2011-03-31 Thiago Jung Bauermann <bauerman@br.ibm.com>
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46 #include "tracepoint.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 void target_ignore (void);
68
69 static void target_command (char *, int);
70
71 static struct target_ops *find_default_run_target (char *);
72
73 static LONGEST default_xfer_partial (struct target_ops *ops,
74 enum target_object object,
75 const char *annex, gdb_byte *readbuf,
76 const gdb_byte *writebuf,
77 ULONGEST offset, LONGEST len);
78
79 static LONGEST current_xfer_partial (struct target_ops *ops,
80 enum target_object object,
81 const char *annex, gdb_byte *readbuf,
82 const gdb_byte *writebuf,
83 ULONGEST offset, LONGEST len);
84
85 static LONGEST target_xfer_partial (struct target_ops *ops,
86 enum target_object object,
87 const char *annex,
88 void *readbuf, const void *writebuf,
89 ULONGEST offset, LONGEST len);
90
91 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
92 ptid_t ptid);
93
94 static void init_dummy_target (void);
95
96 static struct target_ops debug_target;
97
98 static void debug_to_open (char *, int);
99
100 static void debug_to_prepare_to_store (struct regcache *);
101
102 static void debug_to_files_info (struct target_ops *);
103
104 static int debug_to_insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *);
106
107 static int debug_to_remove_breakpoint (struct gdbarch *,
108 struct bp_target_info *);
109
110 static int debug_to_can_use_hw_breakpoint (int, int, int);
111
112 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
116 struct bp_target_info *);
117
118 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
122 struct expression *);
123
124 static int debug_to_stopped_by_watchpoint (void);
125
126 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
127
128 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
129 CORE_ADDR, CORE_ADDR, int);
130
131 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
134 struct expression *);
135
136 static void debug_to_terminal_init (void);
137
138 static void debug_to_terminal_inferior (void);
139
140 static void debug_to_terminal_ours_for_output (void);
141
142 static void debug_to_terminal_save_ours (void);
143
144 static void debug_to_terminal_ours (void);
145
146 static void debug_to_terminal_info (char *, int);
147
148 static void debug_to_load (char *, int);
149
150 static int debug_to_can_run (void);
151
152 static void debug_to_notice_signals (ptid_t);
153
154 static void debug_to_stop (ptid_t);
155
156 /* Pointer to array of target architecture structures; the size of the
157 array; the current index into the array; the allocated size of the
158 array. */
159 struct target_ops **target_structs;
160 unsigned target_struct_size;
161 unsigned target_struct_index;
162 unsigned target_struct_allocsize;
163 #define DEFAULT_ALLOCSIZE 10
164
165 /* The initial current target, so that there is always a semi-valid
166 current target. */
167
168 static struct target_ops dummy_target;
169
170 /* Top of target stack. */
171
172 static struct target_ops *target_stack;
173
174 /* The target structure we are currently using to talk to a process
175 or file or whatever "inferior" we have. */
176
177 struct target_ops current_target;
178
179 /* Command list for target. */
180
181 static struct cmd_list_element *targetlist = NULL;
182
183 /* Nonzero if we should trust readonly sections from the
184 executable when reading memory. */
185
186 static int trust_readonly = 0;
187
188 /* Nonzero if we should show true memory content including
189 memory breakpoint inserted by gdb. */
190
191 static int show_memory_breakpoints = 0;
192
193 /* These globals control whether GDB attempts to perform these
194 operations; they are useful for targets that need to prevent
195 inadvertant disruption, such as in non-stop mode. */
196
197 int may_write_registers = 1;
198
199 int may_write_memory = 1;
200
201 int may_insert_breakpoints = 1;
202
203 int may_insert_tracepoints = 1;
204
205 int may_insert_fast_tracepoints = 1;
206
207 int may_stop = 1;
208
209 /* Non-zero if we want to see trace of target level stuff. */
210
211 static int targetdebug = 0;
212 static void
213 show_targetdebug (struct ui_file *file, int from_tty,
214 struct cmd_list_element *c, const char *value)
215 {
216 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
217 }
218
219 static void setup_target_debug (void);
220
221 /* The option sets this. */
222 static int stack_cache_enabled_p_1 = 1;
223 /* And set_stack_cache_enabled_p updates this.
224 The reason for the separation is so that we don't flush the cache for
225 on->on transitions. */
226 static int stack_cache_enabled_p = 1;
227
228 /* This is called *after* the stack-cache has been set.
229 Flush the cache for off->on and on->off transitions.
230 There's no real need to flush the cache for on->off transitions,
231 except cleanliness. */
232
233 static void
234 set_stack_cache_enabled_p (char *args, int from_tty,
235 struct cmd_list_element *c)
236 {
237 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
238 target_dcache_invalidate ();
239
240 stack_cache_enabled_p = stack_cache_enabled_p_1;
241 }
242
243 static void
244 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
245 struct cmd_list_element *c, const char *value)
246 {
247 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
248 }
249
250 /* Cache of memory operations, to speed up remote access. */
251 static DCACHE *target_dcache;
252
253 /* Invalidate the target dcache. */
254
255 void
256 target_dcache_invalidate (void)
257 {
258 dcache_invalidate (target_dcache);
259 }
260
261 /* The user just typed 'target' without the name of a target. */
262
263 static void
264 target_command (char *arg, int from_tty)
265 {
266 fputs_filtered ("Argument required (target name). Try `help target'\n",
267 gdb_stdout);
268 }
269
270 /* Default target_has_* methods for process_stratum targets. */
271
272 int
273 default_child_has_all_memory (struct target_ops *ops)
274 {
275 /* If no inferior selected, then we can't read memory here. */
276 if (ptid_equal (inferior_ptid, null_ptid))
277 return 0;
278
279 return 1;
280 }
281
282 int
283 default_child_has_memory (struct target_ops *ops)
284 {
285 /* If no inferior selected, then we can't read memory here. */
286 if (ptid_equal (inferior_ptid, null_ptid))
287 return 0;
288
289 return 1;
290 }
291
292 int
293 default_child_has_stack (struct target_ops *ops)
294 {
295 /* If no inferior selected, there's no stack. */
296 if (ptid_equal (inferior_ptid, null_ptid))
297 return 0;
298
299 return 1;
300 }
301
302 int
303 default_child_has_registers (struct target_ops *ops)
304 {
305 /* Can't read registers from no inferior. */
306 if (ptid_equal (inferior_ptid, null_ptid))
307 return 0;
308
309 return 1;
310 }
311
312 int
313 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
314 {
315 /* If there's no thread selected, then we can't make it run through
316 hoops. */
317 if (ptid_equal (the_ptid, null_ptid))
318 return 0;
319
320 return 1;
321 }
322
323
324 int
325 target_has_all_memory_1 (void)
326 {
327 struct target_ops *t;
328
329 for (t = current_target.beneath; t != NULL; t = t->beneath)
330 if (t->to_has_all_memory (t))
331 return 1;
332
333 return 0;
334 }
335
336 int
337 target_has_memory_1 (void)
338 {
339 struct target_ops *t;
340
341 for (t = current_target.beneath; t != NULL; t = t->beneath)
342 if (t->to_has_memory (t))
343 return 1;
344
345 return 0;
346 }
347
348 int
349 target_has_stack_1 (void)
350 {
351 struct target_ops *t;
352
353 for (t = current_target.beneath; t != NULL; t = t->beneath)
354 if (t->to_has_stack (t))
355 return 1;
356
357 return 0;
358 }
359
360 int
361 target_has_registers_1 (void)
362 {
363 struct target_ops *t;
364
365 for (t = current_target.beneath; t != NULL; t = t->beneath)
366 if (t->to_has_registers (t))
367 return 1;
368
369 return 0;
370 }
371
372 int
373 target_has_execution_1 (ptid_t the_ptid)
374 {
375 struct target_ops *t;
376
377 for (t = current_target.beneath; t != NULL; t = t->beneath)
378 if (t->to_has_execution (t, the_ptid))
379 return 1;
380
381 return 0;
382 }
383
384 int
385 target_has_execution_current (void)
386 {
387 return target_has_execution_1 (inferior_ptid);
388 }
389
390 /* Add a possible target architecture to the list. */
391
392 void
393 add_target (struct target_ops *t)
394 {
395 /* Provide default values for all "must have" methods. */
396 if (t->to_xfer_partial == NULL)
397 t->to_xfer_partial = default_xfer_partial;
398
399 if (t->to_has_all_memory == NULL)
400 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_memory == NULL)
403 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_stack == NULL)
406 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_registers == NULL)
409 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
410
411 if (t->to_has_execution == NULL)
412 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
413
414 if (!target_structs)
415 {
416 target_struct_allocsize = DEFAULT_ALLOCSIZE;
417 target_structs = (struct target_ops **) xmalloc
418 (target_struct_allocsize * sizeof (*target_structs));
419 }
420 if (target_struct_size >= target_struct_allocsize)
421 {
422 target_struct_allocsize *= 2;
423 target_structs = (struct target_ops **)
424 xrealloc ((char *) target_structs,
425 target_struct_allocsize * sizeof (*target_structs));
426 }
427 target_structs[target_struct_size++] = t;
428
429 if (targetlist == NULL)
430 add_prefix_cmd ("target", class_run, target_command, _("\
431 Connect to a target machine or process.\n\
432 The first argument is the type or protocol of the target machine.\n\
433 Remaining arguments are interpreted by the target protocol. For more\n\
434 information on the arguments for a particular protocol, type\n\
435 `help target ' followed by the protocol name."),
436 &targetlist, "target ", 0, &cmdlist);
437 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
438 }
439
440 /* Stub functions */
441
442 void
443 target_ignore (void)
444 {
445 }
446
447 void
448 target_kill (void)
449 {
450 struct target_ops *t;
451
452 for (t = current_target.beneath; t != NULL; t = t->beneath)
453 if (t->to_kill != NULL)
454 {
455 if (targetdebug)
456 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
457
458 t->to_kill (t);
459 return;
460 }
461
462 noprocess ();
463 }
464
465 void
466 target_load (char *arg, int from_tty)
467 {
468 target_dcache_invalidate ();
469 (*current_target.to_load) (arg, from_tty);
470 }
471
472 void
473 target_create_inferior (char *exec_file, char *args,
474 char **env, int from_tty)
475 {
476 struct target_ops *t;
477
478 for (t = current_target.beneath; t != NULL; t = t->beneath)
479 {
480 if (t->to_create_inferior != NULL)
481 {
482 t->to_create_inferior (t, exec_file, args, env, from_tty);
483 if (targetdebug)
484 fprintf_unfiltered (gdb_stdlog,
485 "target_create_inferior (%s, %s, xxx, %d)\n",
486 exec_file, args, from_tty);
487 return;
488 }
489 }
490
491 internal_error (__FILE__, __LINE__,
492 _("could not find a target to create inferior"));
493 }
494
495 void
496 target_terminal_inferior (void)
497 {
498 /* A background resume (``run&'') should leave GDB in control of the
499 terminal. Use target_can_async_p, not target_is_async_p, since at
500 this point the target is not async yet. However, if sync_execution
501 is not set, we know it will become async prior to resume. */
502 if (target_can_async_p () && !sync_execution)
503 return;
504
505 /* If GDB is resuming the inferior in the foreground, install
506 inferior's terminal modes. */
507 (*current_target.to_terminal_inferior) ();
508 }
509
510 static int
511 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
512 struct target_ops *t)
513 {
514 errno = EIO; /* Can't read/write this location. */
515 return 0; /* No bytes handled. */
516 }
517
518 static void
519 tcomplain (void)
520 {
521 error (_("You can't do that when your target is `%s'"),
522 current_target.to_shortname);
523 }
524
525 void
526 noprocess (void)
527 {
528 error (_("You can't do that without a process to debug."));
529 }
530
531 static void
532 default_terminal_info (char *args, int from_tty)
533 {
534 printf_unfiltered (_("No saved terminal information.\n"));
535 }
536
537 /* A default implementation for the to_get_ada_task_ptid target method.
538
539 This function builds the PTID by using both LWP and TID as part of
540 the PTID lwp and tid elements. The pid used is the pid of the
541 inferior_ptid. */
542
543 static ptid_t
544 default_get_ada_task_ptid (long lwp, long tid)
545 {
546 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
547 }
548
549 /* Go through the target stack from top to bottom, copying over zero
550 entries in current_target, then filling in still empty entries. In
551 effect, we are doing class inheritance through the pushed target
552 vectors.
553
554 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
555 is currently implemented, is that it discards any knowledge of
556 which target an inherited method originally belonged to.
557 Consequently, new new target methods should instead explicitly and
558 locally search the target stack for the target that can handle the
559 request. */
560
561 static void
562 update_current_target (void)
563 {
564 struct target_ops *t;
565
566 /* First, reset current's contents. */
567 memset (&current_target, 0, sizeof (current_target));
568
569 #define INHERIT(FIELD, TARGET) \
570 if (!current_target.FIELD) \
571 current_target.FIELD = (TARGET)->FIELD
572
573 for (t = target_stack; t; t = t->beneath)
574 {
575 INHERIT (to_shortname, t);
576 INHERIT (to_longname, t);
577 INHERIT (to_doc, t);
578 /* Do not inherit to_open. */
579 /* Do not inherit to_close. */
580 /* Do not inherit to_attach. */
581 INHERIT (to_post_attach, t);
582 INHERIT (to_attach_no_wait, t);
583 /* Do not inherit to_detach. */
584 /* Do not inherit to_disconnect. */
585 /* Do not inherit to_resume. */
586 /* Do not inherit to_wait. */
587 /* Do not inherit to_fetch_registers. */
588 /* Do not inherit to_store_registers. */
589 INHERIT (to_prepare_to_store, t);
590 INHERIT (deprecated_xfer_memory, t);
591 INHERIT (to_files_info, t);
592 INHERIT (to_insert_breakpoint, t);
593 INHERIT (to_remove_breakpoint, t);
594 INHERIT (to_can_use_hw_breakpoint, t);
595 INHERIT (to_insert_hw_breakpoint, t);
596 INHERIT (to_remove_hw_breakpoint, t);
597 /* Do not inherit to_ranged_break_num_registers. */
598 INHERIT (to_insert_watchpoint, t);
599 INHERIT (to_remove_watchpoint, t);
600 INHERIT (to_stopped_data_address, t);
601 INHERIT (to_have_steppable_watchpoint, t);
602 INHERIT (to_have_continuable_watchpoint, t);
603 INHERIT (to_stopped_by_watchpoint, t);
604 INHERIT (to_watchpoint_addr_within_range, t);
605 INHERIT (to_region_ok_for_hw_watchpoint, t);
606 INHERIT (to_can_accel_watchpoint_condition, t);
607 INHERIT (to_terminal_init, t);
608 INHERIT (to_terminal_inferior, t);
609 INHERIT (to_terminal_ours_for_output, t);
610 INHERIT (to_terminal_ours, t);
611 INHERIT (to_terminal_save_ours, t);
612 INHERIT (to_terminal_info, t);
613 /* Do not inherit to_kill. */
614 INHERIT (to_load, t);
615 /* Do no inherit to_create_inferior. */
616 INHERIT (to_post_startup_inferior, t);
617 INHERIT (to_insert_fork_catchpoint, t);
618 INHERIT (to_remove_fork_catchpoint, t);
619 INHERIT (to_insert_vfork_catchpoint, t);
620 INHERIT (to_remove_vfork_catchpoint, t);
621 /* Do not inherit to_follow_fork. */
622 INHERIT (to_insert_exec_catchpoint, t);
623 INHERIT (to_remove_exec_catchpoint, t);
624 INHERIT (to_set_syscall_catchpoint, t);
625 INHERIT (to_has_exited, t);
626 /* Do not inherit to_mourn_inferior. */
627 INHERIT (to_can_run, t);
628 INHERIT (to_notice_signals, t);
629 /* Do not inherit to_thread_alive. */
630 /* Do not inherit to_find_new_threads. */
631 /* Do not inherit to_pid_to_str. */
632 INHERIT (to_extra_thread_info, t);
633 INHERIT (to_thread_name, t);
634 INHERIT (to_stop, t);
635 /* Do not inherit to_xfer_partial. */
636 INHERIT (to_rcmd, t);
637 INHERIT (to_pid_to_exec_file, t);
638 INHERIT (to_log_command, t);
639 INHERIT (to_stratum, t);
640 /* Do not inherit to_has_all_memory. */
641 /* Do not inherit to_has_memory. */
642 /* Do not inherit to_has_stack. */
643 /* Do not inherit to_has_registers. */
644 /* Do not inherit to_has_execution. */
645 INHERIT (to_has_thread_control, t);
646 INHERIT (to_can_async_p, t);
647 INHERIT (to_is_async_p, t);
648 INHERIT (to_async, t);
649 INHERIT (to_async_mask, t);
650 INHERIT (to_find_memory_regions, t);
651 INHERIT (to_make_corefile_notes, t);
652 INHERIT (to_get_bookmark, t);
653 INHERIT (to_goto_bookmark, t);
654 /* Do not inherit to_get_thread_local_address. */
655 INHERIT (to_can_execute_reverse, t);
656 INHERIT (to_thread_architecture, t);
657 /* Do not inherit to_read_description. */
658 INHERIT (to_get_ada_task_ptid, t);
659 /* Do not inherit to_search_memory. */
660 INHERIT (to_supports_multi_process, t);
661 INHERIT (to_trace_init, t);
662 INHERIT (to_download_tracepoint, t);
663 INHERIT (to_download_trace_state_variable, t);
664 INHERIT (to_trace_set_readonly_regions, t);
665 INHERIT (to_trace_start, t);
666 INHERIT (to_get_trace_status, t);
667 INHERIT (to_trace_stop, t);
668 INHERIT (to_trace_find, t);
669 INHERIT (to_get_trace_state_variable_value, t);
670 INHERIT (to_save_trace_data, t);
671 INHERIT (to_upload_tracepoints, t);
672 INHERIT (to_upload_trace_state_variables, t);
673 INHERIT (to_get_raw_trace_data, t);
674 INHERIT (to_set_disconnected_tracing, t);
675 INHERIT (to_set_circular_trace_buffer, t);
676 INHERIT (to_get_tib_address, t);
677 INHERIT (to_set_permissions, t);
678 INHERIT (to_static_tracepoint_marker_at, t);
679 INHERIT (to_static_tracepoint_markers_by_strid, t);
680 INHERIT (to_traceframe_info, t);
681 INHERIT (to_magic, t);
682 /* Do not inherit to_memory_map. */
683 /* Do not inherit to_flash_erase. */
684 /* Do not inherit to_flash_done. */
685 }
686 #undef INHERIT
687
688 /* Clean up a target struct so it no longer has any zero pointers in
689 it. Some entries are defaulted to a method that print an error,
690 others are hard-wired to a standard recursive default. */
691
692 #define de_fault(field, value) \
693 if (!current_target.field) \
694 current_target.field = value
695
696 de_fault (to_open,
697 (void (*) (char *, int))
698 tcomplain);
699 de_fault (to_close,
700 (void (*) (int))
701 target_ignore);
702 de_fault (to_post_attach,
703 (void (*) (int))
704 target_ignore);
705 de_fault (to_prepare_to_store,
706 (void (*) (struct regcache *))
707 noprocess);
708 de_fault (deprecated_xfer_memory,
709 (int (*) (CORE_ADDR, gdb_byte *, int, int,
710 struct mem_attrib *, struct target_ops *))
711 nomemory);
712 de_fault (to_files_info,
713 (void (*) (struct target_ops *))
714 target_ignore);
715 de_fault (to_insert_breakpoint,
716 memory_insert_breakpoint);
717 de_fault (to_remove_breakpoint,
718 memory_remove_breakpoint);
719 de_fault (to_can_use_hw_breakpoint,
720 (int (*) (int, int, int))
721 return_zero);
722 de_fault (to_insert_hw_breakpoint,
723 (int (*) (struct gdbarch *, struct bp_target_info *))
724 return_minus_one);
725 de_fault (to_remove_hw_breakpoint,
726 (int (*) (struct gdbarch *, struct bp_target_info *))
727 return_minus_one);
728 de_fault (to_insert_watchpoint,
729 (int (*) (CORE_ADDR, int, int, struct expression *))
730 return_minus_one);
731 de_fault (to_remove_watchpoint,
732 (int (*) (CORE_ADDR, int, int, struct expression *))
733 return_minus_one);
734 de_fault (to_stopped_by_watchpoint,
735 (int (*) (void))
736 return_zero);
737 de_fault (to_stopped_data_address,
738 (int (*) (struct target_ops *, CORE_ADDR *))
739 return_zero);
740 de_fault (to_watchpoint_addr_within_range,
741 default_watchpoint_addr_within_range);
742 de_fault (to_region_ok_for_hw_watchpoint,
743 default_region_ok_for_hw_watchpoint);
744 de_fault (to_can_accel_watchpoint_condition,
745 (int (*) (CORE_ADDR, int, int, struct expression *))
746 return_zero);
747 de_fault (to_terminal_init,
748 (void (*) (void))
749 target_ignore);
750 de_fault (to_terminal_inferior,
751 (void (*) (void))
752 target_ignore);
753 de_fault (to_terminal_ours_for_output,
754 (void (*) (void))
755 target_ignore);
756 de_fault (to_terminal_ours,
757 (void (*) (void))
758 target_ignore);
759 de_fault (to_terminal_save_ours,
760 (void (*) (void))
761 target_ignore);
762 de_fault (to_terminal_info,
763 default_terminal_info);
764 de_fault (to_load,
765 (void (*) (char *, int))
766 tcomplain);
767 de_fault (to_post_startup_inferior,
768 (void (*) (ptid_t))
769 target_ignore);
770 de_fault (to_insert_fork_catchpoint,
771 (int (*) (int))
772 return_one);
773 de_fault (to_remove_fork_catchpoint,
774 (int (*) (int))
775 return_one);
776 de_fault (to_insert_vfork_catchpoint,
777 (int (*) (int))
778 return_one);
779 de_fault (to_remove_vfork_catchpoint,
780 (int (*) (int))
781 return_one);
782 de_fault (to_insert_exec_catchpoint,
783 (int (*) (int))
784 return_one);
785 de_fault (to_remove_exec_catchpoint,
786 (int (*) (int))
787 return_one);
788 de_fault (to_set_syscall_catchpoint,
789 (int (*) (int, int, int, int, int *))
790 return_one);
791 de_fault (to_has_exited,
792 (int (*) (int, int, int *))
793 return_zero);
794 de_fault (to_can_run,
795 return_zero);
796 de_fault (to_notice_signals,
797 (void (*) (ptid_t))
798 target_ignore);
799 de_fault (to_extra_thread_info,
800 (char *(*) (struct thread_info *))
801 return_zero);
802 de_fault (to_thread_name,
803 (char *(*) (struct thread_info *))
804 return_zero);
805 de_fault (to_stop,
806 (void (*) (ptid_t))
807 target_ignore);
808 current_target.to_xfer_partial = current_xfer_partial;
809 de_fault (to_rcmd,
810 (void (*) (char *, struct ui_file *))
811 tcomplain);
812 de_fault (to_pid_to_exec_file,
813 (char *(*) (int))
814 return_zero);
815 de_fault (to_async,
816 (void (*) (void (*) (enum inferior_event_type, void*), void*))
817 tcomplain);
818 de_fault (to_async_mask,
819 (int (*) (int))
820 return_one);
821 de_fault (to_thread_architecture,
822 default_thread_architecture);
823 current_target.to_read_description = NULL;
824 de_fault (to_get_ada_task_ptid,
825 (ptid_t (*) (long, long))
826 default_get_ada_task_ptid);
827 de_fault (to_supports_multi_process,
828 (int (*) (void))
829 return_zero);
830 de_fault (to_trace_init,
831 (void (*) (void))
832 tcomplain);
833 de_fault (to_download_tracepoint,
834 (void (*) (struct breakpoint *))
835 tcomplain);
836 de_fault (to_download_trace_state_variable,
837 (void (*) (struct trace_state_variable *))
838 tcomplain);
839 de_fault (to_trace_set_readonly_regions,
840 (void (*) (void))
841 tcomplain);
842 de_fault (to_trace_start,
843 (void (*) (void))
844 tcomplain);
845 de_fault (to_get_trace_status,
846 (int (*) (struct trace_status *))
847 return_minus_one);
848 de_fault (to_trace_stop,
849 (void (*) (void))
850 tcomplain);
851 de_fault (to_trace_find,
852 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
853 return_minus_one);
854 de_fault (to_get_trace_state_variable_value,
855 (int (*) (int, LONGEST *))
856 return_zero);
857 de_fault (to_save_trace_data,
858 (int (*) (const char *))
859 tcomplain);
860 de_fault (to_upload_tracepoints,
861 (int (*) (struct uploaded_tp **))
862 return_zero);
863 de_fault (to_upload_trace_state_variables,
864 (int (*) (struct uploaded_tsv **))
865 return_zero);
866 de_fault (to_get_raw_trace_data,
867 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
868 tcomplain);
869 de_fault (to_set_disconnected_tracing,
870 (void (*) (int))
871 target_ignore);
872 de_fault (to_set_circular_trace_buffer,
873 (void (*) (int))
874 target_ignore);
875 de_fault (to_get_tib_address,
876 (int (*) (ptid_t, CORE_ADDR *))
877 tcomplain);
878 de_fault (to_set_permissions,
879 (void (*) (void))
880 target_ignore);
881 de_fault (to_static_tracepoint_marker_at,
882 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
883 return_zero);
884 de_fault (to_static_tracepoint_markers_by_strid,
885 (VEC(static_tracepoint_marker_p) * (*) (const char *))
886 tcomplain);
887 de_fault (to_traceframe_info,
888 (struct traceframe_info * (*) (void))
889 tcomplain);
890 #undef de_fault
891
892 /* Finally, position the target-stack beneath the squashed
893 "current_target". That way code looking for a non-inherited
894 target method can quickly and simply find it. */
895 current_target.beneath = target_stack;
896
897 if (targetdebug)
898 setup_target_debug ();
899 }
900
901 /* Push a new target type into the stack of the existing target accessors,
902 possibly superseding some of the existing accessors.
903
904 Rather than allow an empty stack, we always have the dummy target at
905 the bottom stratum, so we can call the function vectors without
906 checking them. */
907
908 void
909 push_target (struct target_ops *t)
910 {
911 struct target_ops **cur;
912
913 /* Check magic number. If wrong, it probably means someone changed
914 the struct definition, but not all the places that initialize one. */
915 if (t->to_magic != OPS_MAGIC)
916 {
917 fprintf_unfiltered (gdb_stderr,
918 "Magic number of %s target struct wrong\n",
919 t->to_shortname);
920 internal_error (__FILE__, __LINE__,
921 _("failed internal consistency check"));
922 }
923
924 /* Find the proper stratum to install this target in. */
925 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
926 {
927 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
928 break;
929 }
930
931 /* If there's already targets at this stratum, remove them. */
932 /* FIXME: cagney/2003-10-15: I think this should be popping all
933 targets to CUR, and not just those at this stratum level. */
934 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
935 {
936 /* There's already something at this stratum level. Close it,
937 and un-hook it from the stack. */
938 struct target_ops *tmp = (*cur);
939
940 (*cur) = (*cur)->beneath;
941 tmp->beneath = NULL;
942 target_close (tmp, 0);
943 }
944
945 /* We have removed all targets in our stratum, now add the new one. */
946 t->beneath = (*cur);
947 (*cur) = t;
948
949 update_current_target ();
950 }
951
952 /* Remove a target_ops vector from the stack, wherever it may be.
953 Return how many times it was removed (0 or 1). */
954
955 int
956 unpush_target (struct target_ops *t)
957 {
958 struct target_ops **cur;
959 struct target_ops *tmp;
960
961 if (t->to_stratum == dummy_stratum)
962 internal_error (__FILE__, __LINE__,
963 _("Attempt to unpush the dummy target"));
964
965 /* Look for the specified target. Note that we assume that a target
966 can only occur once in the target stack. */
967
968 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
969 {
970 if ((*cur) == t)
971 break;
972 }
973
974 if ((*cur) == NULL)
975 return 0; /* Didn't find target_ops, quit now. */
976
977 /* NOTE: cagney/2003-12-06: In '94 the close call was made
978 unconditional by moving it to before the above check that the
979 target was in the target stack (something about "Change the way
980 pushing and popping of targets work to support target overlays
981 and inheritance"). This doesn't make much sense - only open
982 targets should be closed. */
983 target_close (t, 0);
984
985 /* Unchain the target. */
986 tmp = (*cur);
987 (*cur) = (*cur)->beneath;
988 tmp->beneath = NULL;
989
990 update_current_target ();
991
992 return 1;
993 }
994
995 void
996 pop_target (void)
997 {
998 target_close (target_stack, 0); /* Let it clean up. */
999 if (unpush_target (target_stack) == 1)
1000 return;
1001
1002 fprintf_unfiltered (gdb_stderr,
1003 "pop_target couldn't find target %s\n",
1004 current_target.to_shortname);
1005 internal_error (__FILE__, __LINE__,
1006 _("failed internal consistency check"));
1007 }
1008
1009 void
1010 pop_all_targets_above (enum strata above_stratum, int quitting)
1011 {
1012 while ((int) (current_target.to_stratum) > (int) above_stratum)
1013 {
1014 target_close (target_stack, quitting);
1015 if (!unpush_target (target_stack))
1016 {
1017 fprintf_unfiltered (gdb_stderr,
1018 "pop_all_targets couldn't find target %s\n",
1019 target_stack->to_shortname);
1020 internal_error (__FILE__, __LINE__,
1021 _("failed internal consistency check"));
1022 break;
1023 }
1024 }
1025 }
1026
1027 void
1028 pop_all_targets (int quitting)
1029 {
1030 pop_all_targets_above (dummy_stratum, quitting);
1031 }
1032
1033 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1034
1035 int
1036 target_is_pushed (struct target_ops *t)
1037 {
1038 struct target_ops **cur;
1039
1040 /* Check magic number. If wrong, it probably means someone changed
1041 the struct definition, but not all the places that initialize one. */
1042 if (t->to_magic != OPS_MAGIC)
1043 {
1044 fprintf_unfiltered (gdb_stderr,
1045 "Magic number of %s target struct wrong\n",
1046 t->to_shortname);
1047 internal_error (__FILE__, __LINE__,
1048 _("failed internal consistency check"));
1049 }
1050
1051 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1052 if (*cur == t)
1053 return 1;
1054
1055 return 0;
1056 }
1057
1058 /* Using the objfile specified in OBJFILE, find the address for the
1059 current thread's thread-local storage with offset OFFSET. */
1060 CORE_ADDR
1061 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1062 {
1063 volatile CORE_ADDR addr = 0;
1064 struct target_ops *target;
1065
1066 for (target = current_target.beneath;
1067 target != NULL;
1068 target = target->beneath)
1069 {
1070 if (target->to_get_thread_local_address != NULL)
1071 break;
1072 }
1073
1074 if (target != NULL
1075 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1076 {
1077 ptid_t ptid = inferior_ptid;
1078 volatile struct gdb_exception ex;
1079
1080 TRY_CATCH (ex, RETURN_MASK_ALL)
1081 {
1082 CORE_ADDR lm_addr;
1083
1084 /* Fetch the load module address for this objfile. */
1085 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1086 objfile);
1087 /* If it's 0, throw the appropriate exception. */
1088 if (lm_addr == 0)
1089 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1090 _("TLS load module not found"));
1091
1092 addr = target->to_get_thread_local_address (target, ptid,
1093 lm_addr, offset);
1094 }
1095 /* If an error occurred, print TLS related messages here. Otherwise,
1096 throw the error to some higher catcher. */
1097 if (ex.reason < 0)
1098 {
1099 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1100
1101 switch (ex.error)
1102 {
1103 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1104 error (_("Cannot find thread-local variables "
1105 "in this thread library."));
1106 break;
1107 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1108 if (objfile_is_library)
1109 error (_("Cannot find shared library `%s' in dynamic"
1110 " linker's load module list"), objfile->name);
1111 else
1112 error (_("Cannot find executable file `%s' in dynamic"
1113 " linker's load module list"), objfile->name);
1114 break;
1115 case TLS_NOT_ALLOCATED_YET_ERROR:
1116 if (objfile_is_library)
1117 error (_("The inferior has not yet allocated storage for"
1118 " thread-local variables in\n"
1119 "the shared library `%s'\n"
1120 "for %s"),
1121 objfile->name, target_pid_to_str (ptid));
1122 else
1123 error (_("The inferior has not yet allocated storage for"
1124 " thread-local variables in\n"
1125 "the executable `%s'\n"
1126 "for %s"),
1127 objfile->name, target_pid_to_str (ptid));
1128 break;
1129 case TLS_GENERIC_ERROR:
1130 if (objfile_is_library)
1131 error (_("Cannot find thread-local storage for %s, "
1132 "shared library %s:\n%s"),
1133 target_pid_to_str (ptid),
1134 objfile->name, ex.message);
1135 else
1136 error (_("Cannot find thread-local storage for %s, "
1137 "executable file %s:\n%s"),
1138 target_pid_to_str (ptid),
1139 objfile->name, ex.message);
1140 break;
1141 default:
1142 throw_exception (ex);
1143 break;
1144 }
1145 }
1146 }
1147 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1148 TLS is an ABI-specific thing. But we don't do that yet. */
1149 else
1150 error (_("Cannot find thread-local variables on this target"));
1151
1152 return addr;
1153 }
1154
1155 #undef MIN
1156 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1157
1158 /* target_read_string -- read a null terminated string, up to LEN bytes,
1159 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1160 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1161 is responsible for freeing it. Return the number of bytes successfully
1162 read. */
1163
1164 int
1165 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1166 {
1167 int tlen, origlen, offset, i;
1168 gdb_byte buf[4];
1169 int errcode = 0;
1170 char *buffer;
1171 int buffer_allocated;
1172 char *bufptr;
1173 unsigned int nbytes_read = 0;
1174
1175 gdb_assert (string);
1176
1177 /* Small for testing. */
1178 buffer_allocated = 4;
1179 buffer = xmalloc (buffer_allocated);
1180 bufptr = buffer;
1181
1182 origlen = len;
1183
1184 while (len > 0)
1185 {
1186 tlen = MIN (len, 4 - (memaddr & 3));
1187 offset = memaddr & 3;
1188
1189 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1190 if (errcode != 0)
1191 {
1192 /* The transfer request might have crossed the boundary to an
1193 unallocated region of memory. Retry the transfer, requesting
1194 a single byte. */
1195 tlen = 1;
1196 offset = 0;
1197 errcode = target_read_memory (memaddr, buf, 1);
1198 if (errcode != 0)
1199 goto done;
1200 }
1201
1202 if (bufptr - buffer + tlen > buffer_allocated)
1203 {
1204 unsigned int bytes;
1205
1206 bytes = bufptr - buffer;
1207 buffer_allocated *= 2;
1208 buffer = xrealloc (buffer, buffer_allocated);
1209 bufptr = buffer + bytes;
1210 }
1211
1212 for (i = 0; i < tlen; i++)
1213 {
1214 *bufptr++ = buf[i + offset];
1215 if (buf[i + offset] == '\000')
1216 {
1217 nbytes_read += i + 1;
1218 goto done;
1219 }
1220 }
1221
1222 memaddr += tlen;
1223 len -= tlen;
1224 nbytes_read += tlen;
1225 }
1226 done:
1227 *string = buffer;
1228 if (errnop != NULL)
1229 *errnop = errcode;
1230 return nbytes_read;
1231 }
1232
1233 struct target_section_table *
1234 target_get_section_table (struct target_ops *target)
1235 {
1236 struct target_ops *t;
1237
1238 if (targetdebug)
1239 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1240
1241 for (t = target; t != NULL; t = t->beneath)
1242 if (t->to_get_section_table != NULL)
1243 return (*t->to_get_section_table) (t);
1244
1245 return NULL;
1246 }
1247
1248 /* Find a section containing ADDR. */
1249
1250 struct target_section *
1251 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1252 {
1253 struct target_section_table *table = target_get_section_table (target);
1254 struct target_section *secp;
1255
1256 if (table == NULL)
1257 return NULL;
1258
1259 for (secp = table->sections; secp < table->sections_end; secp++)
1260 {
1261 if (addr >= secp->addr && addr < secp->endaddr)
1262 return secp;
1263 }
1264 return NULL;
1265 }
1266
1267 /* Read memory from the live target, even if currently inspecting a
1268 traceframe. The return is the same as that of target_read. */
1269
1270 static LONGEST
1271 target_read_live_memory (enum target_object object,
1272 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1273 {
1274 int ret;
1275 struct cleanup *cleanup;
1276
1277 /* Switch momentarily out of tfind mode so to access live memory.
1278 Note that this must not clear global state, such as the frame
1279 cache, which must still remain valid for the previous traceframe.
1280 We may be _building_ the frame cache at this point. */
1281 cleanup = make_cleanup_restore_traceframe_number ();
1282 set_traceframe_number (-1);
1283
1284 ret = target_read (current_target.beneath, object, NULL,
1285 myaddr, memaddr, len);
1286
1287 do_cleanups (cleanup);
1288 return ret;
1289 }
1290
1291 /* Using the set of read-only target sections of OPS, read live
1292 read-only memory. Note that the actual reads start from the
1293 top-most target again.
1294
1295 For interface/parameters/return description see target.h,
1296 to_xfer_partial. */
1297
1298 static LONGEST
1299 memory_xfer_live_readonly_partial (struct target_ops *ops,
1300 enum target_object object,
1301 gdb_byte *readbuf, ULONGEST memaddr,
1302 LONGEST len)
1303 {
1304 struct target_section *secp;
1305 struct target_section_table *table;
1306
1307 secp = target_section_by_addr (ops, memaddr);
1308 if (secp != NULL
1309 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1310 & SEC_READONLY))
1311 {
1312 struct target_section *p;
1313 ULONGEST memend = memaddr + len;
1314
1315 table = target_get_section_table (ops);
1316
1317 for (p = table->sections; p < table->sections_end; p++)
1318 {
1319 if (memaddr >= p->addr)
1320 {
1321 if (memend <= p->endaddr)
1322 {
1323 /* Entire transfer is within this section. */
1324 return target_read_live_memory (object, memaddr,
1325 readbuf, len);
1326 }
1327 else if (memaddr >= p->endaddr)
1328 {
1329 /* This section ends before the transfer starts. */
1330 continue;
1331 }
1332 else
1333 {
1334 /* This section overlaps the transfer. Just do half. */
1335 len = p->endaddr - memaddr;
1336 return target_read_live_memory (object, memaddr,
1337 readbuf, len);
1338 }
1339 }
1340 }
1341 }
1342
1343 return 0;
1344 }
1345
1346 /* Perform a partial memory transfer.
1347 For docs see target.h, to_xfer_partial. */
1348
1349 static LONGEST
1350 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1351 void *readbuf, const void *writebuf, ULONGEST memaddr,
1352 LONGEST len)
1353 {
1354 LONGEST res;
1355 int reg_len;
1356 struct mem_region *region;
1357 struct inferior *inf;
1358
1359 /* Zero length requests are ok and require no work. */
1360 if (len == 0)
1361 return 0;
1362
1363 /* For accesses to unmapped overlay sections, read directly from
1364 files. Must do this first, as MEMADDR may need adjustment. */
1365 if (readbuf != NULL && overlay_debugging)
1366 {
1367 struct obj_section *section = find_pc_overlay (memaddr);
1368
1369 if (pc_in_unmapped_range (memaddr, section))
1370 {
1371 struct target_section_table *table
1372 = target_get_section_table (ops);
1373 const char *section_name = section->the_bfd_section->name;
1374
1375 memaddr = overlay_mapped_address (memaddr, section);
1376 return section_table_xfer_memory_partial (readbuf, writebuf,
1377 memaddr, len,
1378 table->sections,
1379 table->sections_end,
1380 section_name);
1381 }
1382 }
1383
1384 /* Try the executable files, if "trust-readonly-sections" is set. */
1385 if (readbuf != NULL && trust_readonly)
1386 {
1387 struct target_section *secp;
1388 struct target_section_table *table;
1389
1390 secp = target_section_by_addr (ops, memaddr);
1391 if (secp != NULL
1392 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1393 & SEC_READONLY))
1394 {
1395 table = target_get_section_table (ops);
1396 return section_table_xfer_memory_partial (readbuf, writebuf,
1397 memaddr, len,
1398 table->sections,
1399 table->sections_end,
1400 NULL);
1401 }
1402 }
1403
1404 /* If reading unavailable memory in the context of traceframes, and
1405 this address falls within a read-only section, fallback to
1406 reading from live memory. */
1407 if (readbuf != NULL && get_traceframe_number () != -1)
1408 {
1409 VEC(mem_range_s) *available;
1410
1411 /* If we fail to get the set of available memory, then the
1412 target does not support querying traceframe info, and so we
1413 attempt reading from the traceframe anyway (assuming the
1414 target implements the old QTro packet then). */
1415 if (traceframe_available_memory (&available, memaddr, len))
1416 {
1417 struct cleanup *old_chain;
1418
1419 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1420
1421 if (VEC_empty (mem_range_s, available)
1422 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1423 {
1424 /* Don't read into the traceframe's available
1425 memory. */
1426 if (!VEC_empty (mem_range_s, available))
1427 {
1428 LONGEST oldlen = len;
1429
1430 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1431 gdb_assert (len <= oldlen);
1432 }
1433
1434 do_cleanups (old_chain);
1435
1436 /* This goes through the topmost target again. */
1437 res = memory_xfer_live_readonly_partial (ops, object,
1438 readbuf, memaddr, len);
1439 if (res > 0)
1440 return res;
1441
1442 /* No use trying further, we know some memory starting
1443 at MEMADDR isn't available. */
1444 return -1;
1445 }
1446
1447 /* Don't try to read more than how much is available, in
1448 case the target implements the deprecated QTro packet to
1449 cater for older GDBs (the target's knowledge of read-only
1450 sections may be outdated by now). */
1451 len = VEC_index (mem_range_s, available, 0)->length;
1452
1453 do_cleanups (old_chain);
1454 }
1455 }
1456
1457 /* Try GDB's internal data cache. */
1458 region = lookup_mem_region (memaddr);
1459 /* region->hi == 0 means there's no upper bound. */
1460 if (memaddr + len < region->hi || region->hi == 0)
1461 reg_len = len;
1462 else
1463 reg_len = region->hi - memaddr;
1464
1465 switch (region->attrib.mode)
1466 {
1467 case MEM_RO:
1468 if (writebuf != NULL)
1469 return -1;
1470 break;
1471
1472 case MEM_WO:
1473 if (readbuf != NULL)
1474 return -1;
1475 break;
1476
1477 case MEM_FLASH:
1478 /* We only support writing to flash during "load" for now. */
1479 if (writebuf != NULL)
1480 error (_("Writing to flash memory forbidden in this context"));
1481 break;
1482
1483 case MEM_NONE:
1484 return -1;
1485 }
1486
1487 if (!ptid_equal (inferior_ptid, null_ptid))
1488 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1489 else
1490 inf = NULL;
1491
1492 if (inf != NULL
1493 /* The dcache reads whole cache lines; that doesn't play well
1494 with reading from a trace buffer, because reading outside of
1495 the collected memory range fails. */
1496 && get_traceframe_number () == -1
1497 && (region->attrib.cache
1498 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1499 {
1500 if (readbuf != NULL)
1501 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1502 reg_len, 0);
1503 else
1504 /* FIXME drow/2006-08-09: If we're going to preserve const
1505 correctness dcache_xfer_memory should take readbuf and
1506 writebuf. */
1507 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1508 (void *) writebuf,
1509 reg_len, 1);
1510 if (res <= 0)
1511 return -1;
1512 else
1513 {
1514 if (readbuf && !show_memory_breakpoints)
1515 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1516 return res;
1517 }
1518 }
1519
1520 /* If none of those methods found the memory we wanted, fall back
1521 to a target partial transfer. Normally a single call to
1522 to_xfer_partial is enough; if it doesn't recognize an object
1523 it will call the to_xfer_partial of the next target down.
1524 But for memory this won't do. Memory is the only target
1525 object which can be read from more than one valid target.
1526 A core file, for instance, could have some of memory but
1527 delegate other bits to the target below it. So, we must
1528 manually try all targets. */
1529
1530 do
1531 {
1532 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1533 readbuf, writebuf, memaddr, reg_len);
1534 if (res > 0)
1535 break;
1536
1537 /* We want to continue past core files to executables, but not
1538 past a running target's memory. */
1539 if (ops->to_has_all_memory (ops))
1540 break;
1541
1542 ops = ops->beneath;
1543 }
1544 while (ops != NULL);
1545
1546 if (res > 0 && readbuf != NULL && !show_memory_breakpoints)
1547 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1548
1549 /* Make sure the cache gets updated no matter what - if we are writing
1550 to the stack. Even if this write is not tagged as such, we still need
1551 to update the cache. */
1552
1553 if (res > 0
1554 && inf != NULL
1555 && writebuf != NULL
1556 && !region->attrib.cache
1557 && stack_cache_enabled_p
1558 && object != TARGET_OBJECT_STACK_MEMORY)
1559 {
1560 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1561 }
1562
1563 /* If we still haven't got anything, return the last error. We
1564 give up. */
1565 return res;
1566 }
1567
1568 static void
1569 restore_show_memory_breakpoints (void *arg)
1570 {
1571 show_memory_breakpoints = (uintptr_t) arg;
1572 }
1573
1574 struct cleanup *
1575 make_show_memory_breakpoints_cleanup (int show)
1576 {
1577 int current = show_memory_breakpoints;
1578
1579 show_memory_breakpoints = show;
1580 return make_cleanup (restore_show_memory_breakpoints,
1581 (void *) (uintptr_t) current);
1582 }
1583
1584 /* For docs see target.h, to_xfer_partial. */
1585
1586 static LONGEST
1587 target_xfer_partial (struct target_ops *ops,
1588 enum target_object object, const char *annex,
1589 void *readbuf, const void *writebuf,
1590 ULONGEST offset, LONGEST len)
1591 {
1592 LONGEST retval;
1593
1594 gdb_assert (ops->to_xfer_partial != NULL);
1595
1596 if (writebuf && !may_write_memory)
1597 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1598 core_addr_to_string_nz (offset), plongest (len));
1599
1600 /* If this is a memory transfer, let the memory-specific code
1601 have a look at it instead. Memory transfers are more
1602 complicated. */
1603 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1604 retval = memory_xfer_partial (ops, object, readbuf,
1605 writebuf, offset, len);
1606 else
1607 {
1608 enum target_object raw_object = object;
1609
1610 /* If this is a raw memory transfer, request the normal
1611 memory object from other layers. */
1612 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1613 raw_object = TARGET_OBJECT_MEMORY;
1614
1615 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1616 writebuf, offset, len);
1617 }
1618
1619 if (targetdebug)
1620 {
1621 const unsigned char *myaddr = NULL;
1622
1623 fprintf_unfiltered (gdb_stdlog,
1624 "%s:target_xfer_partial "
1625 "(%d, %s, %s, %s, %s, %s) = %s",
1626 ops->to_shortname,
1627 (int) object,
1628 (annex ? annex : "(null)"),
1629 host_address_to_string (readbuf),
1630 host_address_to_string (writebuf),
1631 core_addr_to_string_nz (offset),
1632 plongest (len), plongest (retval));
1633
1634 if (readbuf)
1635 myaddr = readbuf;
1636 if (writebuf)
1637 myaddr = writebuf;
1638 if (retval > 0 && myaddr != NULL)
1639 {
1640 int i;
1641
1642 fputs_unfiltered (", bytes =", gdb_stdlog);
1643 for (i = 0; i < retval; i++)
1644 {
1645 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1646 {
1647 if (targetdebug < 2 && i > 0)
1648 {
1649 fprintf_unfiltered (gdb_stdlog, " ...");
1650 break;
1651 }
1652 fprintf_unfiltered (gdb_stdlog, "\n");
1653 }
1654
1655 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1656 }
1657 }
1658
1659 fputc_unfiltered ('\n', gdb_stdlog);
1660 }
1661 return retval;
1662 }
1663
1664 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1665 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1666 if any error occurs.
1667
1668 If an error occurs, no guarantee is made about the contents of the data at
1669 MYADDR. In particular, the caller should not depend upon partial reads
1670 filling the buffer with good data. There is no way for the caller to know
1671 how much good data might have been transfered anyway. Callers that can
1672 deal with partial reads should call target_read (which will retry until
1673 it makes no progress, and then return how much was transferred). */
1674
1675 int
1676 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1677 {
1678 /* Dispatch to the topmost target, not the flattened current_target.
1679 Memory accesses check target->to_has_(all_)memory, and the
1680 flattened target doesn't inherit those. */
1681 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1682 myaddr, memaddr, len) == len)
1683 return 0;
1684 else
1685 return EIO;
1686 }
1687
1688 /* Like target_read_memory, but specify explicitly that this is a read from
1689 the target's stack. This may trigger different cache behavior. */
1690
1691 int
1692 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1693 {
1694 /* Dispatch to the topmost target, not the flattened current_target.
1695 Memory accesses check target->to_has_(all_)memory, and the
1696 flattened target doesn't inherit those. */
1697
1698 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1699 myaddr, memaddr, len) == len)
1700 return 0;
1701 else
1702 return EIO;
1703 }
1704
1705 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1706 Returns either 0 for success or an errno value if any error occurs.
1707 If an error occurs, no guarantee is made about how much data got written.
1708 Callers that can deal with partial writes should call target_write. */
1709
1710 int
1711 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1712 {
1713 /* Dispatch to the topmost target, not the flattened current_target.
1714 Memory accesses check target->to_has_(all_)memory, and the
1715 flattened target doesn't inherit those. */
1716 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1717 myaddr, memaddr, len) == len)
1718 return 0;
1719 else
1720 return EIO;
1721 }
1722
1723 /* Fetch the target's memory map. */
1724
1725 VEC(mem_region_s) *
1726 target_memory_map (void)
1727 {
1728 VEC(mem_region_s) *result;
1729 struct mem_region *last_one, *this_one;
1730 int ix;
1731 struct target_ops *t;
1732
1733 if (targetdebug)
1734 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1735
1736 for (t = current_target.beneath; t != NULL; t = t->beneath)
1737 if (t->to_memory_map != NULL)
1738 break;
1739
1740 if (t == NULL)
1741 return NULL;
1742
1743 result = t->to_memory_map (t);
1744 if (result == NULL)
1745 return NULL;
1746
1747 qsort (VEC_address (mem_region_s, result),
1748 VEC_length (mem_region_s, result),
1749 sizeof (struct mem_region), mem_region_cmp);
1750
1751 /* Check that regions do not overlap. Simultaneously assign
1752 a numbering for the "mem" commands to use to refer to
1753 each region. */
1754 last_one = NULL;
1755 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1756 {
1757 this_one->number = ix;
1758
1759 if (last_one && last_one->hi > this_one->lo)
1760 {
1761 warning (_("Overlapping regions in memory map: ignoring"));
1762 VEC_free (mem_region_s, result);
1763 return NULL;
1764 }
1765 last_one = this_one;
1766 }
1767
1768 return result;
1769 }
1770
1771 void
1772 target_flash_erase (ULONGEST address, LONGEST length)
1773 {
1774 struct target_ops *t;
1775
1776 for (t = current_target.beneath; t != NULL; t = t->beneath)
1777 if (t->to_flash_erase != NULL)
1778 {
1779 if (targetdebug)
1780 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1781 hex_string (address), phex (length, 0));
1782 t->to_flash_erase (t, address, length);
1783 return;
1784 }
1785
1786 tcomplain ();
1787 }
1788
1789 void
1790 target_flash_done (void)
1791 {
1792 struct target_ops *t;
1793
1794 for (t = current_target.beneath; t != NULL; t = t->beneath)
1795 if (t->to_flash_done != NULL)
1796 {
1797 if (targetdebug)
1798 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1799 t->to_flash_done (t);
1800 return;
1801 }
1802
1803 tcomplain ();
1804 }
1805
1806 static void
1807 show_trust_readonly (struct ui_file *file, int from_tty,
1808 struct cmd_list_element *c, const char *value)
1809 {
1810 fprintf_filtered (file,
1811 _("Mode for reading from readonly sections is %s.\n"),
1812 value);
1813 }
1814
1815 /* More generic transfers. */
1816
1817 static LONGEST
1818 default_xfer_partial (struct target_ops *ops, enum target_object object,
1819 const char *annex, gdb_byte *readbuf,
1820 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1821 {
1822 if (object == TARGET_OBJECT_MEMORY
1823 && ops->deprecated_xfer_memory != NULL)
1824 /* If available, fall back to the target's
1825 "deprecated_xfer_memory" method. */
1826 {
1827 int xfered = -1;
1828
1829 errno = 0;
1830 if (writebuf != NULL)
1831 {
1832 void *buffer = xmalloc (len);
1833 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1834
1835 memcpy (buffer, writebuf, len);
1836 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1837 1/*write*/, NULL, ops);
1838 do_cleanups (cleanup);
1839 }
1840 if (readbuf != NULL)
1841 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1842 0/*read*/, NULL, ops);
1843 if (xfered > 0)
1844 return xfered;
1845 else if (xfered == 0 && errno == 0)
1846 /* "deprecated_xfer_memory" uses 0, cross checked against
1847 ERRNO as one indication of an error. */
1848 return 0;
1849 else
1850 return -1;
1851 }
1852 else if (ops->beneath != NULL)
1853 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1854 readbuf, writebuf, offset, len);
1855 else
1856 return -1;
1857 }
1858
1859 /* The xfer_partial handler for the topmost target. Unlike the default,
1860 it does not need to handle memory specially; it just passes all
1861 requests down the stack. */
1862
1863 static LONGEST
1864 current_xfer_partial (struct target_ops *ops, enum target_object object,
1865 const char *annex, gdb_byte *readbuf,
1866 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1867 {
1868 if (ops->beneath != NULL)
1869 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1870 readbuf, writebuf, offset, len);
1871 else
1872 return -1;
1873 }
1874
1875 /* Target vector read/write partial wrapper functions. */
1876
1877 static LONGEST
1878 target_read_partial (struct target_ops *ops,
1879 enum target_object object,
1880 const char *annex, gdb_byte *buf,
1881 ULONGEST offset, LONGEST len)
1882 {
1883 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1884 }
1885
1886 static LONGEST
1887 target_write_partial (struct target_ops *ops,
1888 enum target_object object,
1889 const char *annex, const gdb_byte *buf,
1890 ULONGEST offset, LONGEST len)
1891 {
1892 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1893 }
1894
1895 /* Wrappers to perform the full transfer. */
1896
1897 /* For docs on target_read see target.h. */
1898
1899 LONGEST
1900 target_read (struct target_ops *ops,
1901 enum target_object object,
1902 const char *annex, gdb_byte *buf,
1903 ULONGEST offset, LONGEST len)
1904 {
1905 LONGEST xfered = 0;
1906
1907 while (xfered < len)
1908 {
1909 LONGEST xfer = target_read_partial (ops, object, annex,
1910 (gdb_byte *) buf + xfered,
1911 offset + xfered, len - xfered);
1912
1913 /* Call an observer, notifying them of the xfer progress? */
1914 if (xfer == 0)
1915 return xfered;
1916 if (xfer < 0)
1917 return -1;
1918 xfered += xfer;
1919 QUIT;
1920 }
1921 return len;
1922 }
1923
1924 /* Assuming that the entire [begin, end) range of memory cannot be
1925 read, try to read whatever subrange is possible to read.
1926
1927 The function returns, in RESULT, either zero or one memory block.
1928 If there's a readable subrange at the beginning, it is completely
1929 read and returned. Any further readable subrange will not be read.
1930 Otherwise, if there's a readable subrange at the end, it will be
1931 completely read and returned. Any readable subranges before it
1932 (obviously, not starting at the beginning), will be ignored. In
1933 other cases -- either no readable subrange, or readable subrange(s)
1934 that is neither at the beginning, or end, nothing is returned.
1935
1936 The purpose of this function is to handle a read across a boundary
1937 of accessible memory in a case when memory map is not available.
1938 The above restrictions are fine for this case, but will give
1939 incorrect results if the memory is 'patchy'. However, supporting
1940 'patchy' memory would require trying to read every single byte,
1941 and it seems unacceptable solution. Explicit memory map is
1942 recommended for this case -- and target_read_memory_robust will
1943 take care of reading multiple ranges then. */
1944
1945 static void
1946 read_whatever_is_readable (struct target_ops *ops,
1947 ULONGEST begin, ULONGEST end,
1948 VEC(memory_read_result_s) **result)
1949 {
1950 gdb_byte *buf = xmalloc (end - begin);
1951 ULONGEST current_begin = begin;
1952 ULONGEST current_end = end;
1953 int forward;
1954 memory_read_result_s r;
1955
1956 /* If we previously failed to read 1 byte, nothing can be done here. */
1957 if (end - begin <= 1)
1958 {
1959 xfree (buf);
1960 return;
1961 }
1962
1963 /* Check that either first or the last byte is readable, and give up
1964 if not. This heuristic is meant to permit reading accessible memory
1965 at the boundary of accessible region. */
1966 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1967 buf, begin, 1) == 1)
1968 {
1969 forward = 1;
1970 ++current_begin;
1971 }
1972 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1973 buf + (end-begin) - 1, end - 1, 1) == 1)
1974 {
1975 forward = 0;
1976 --current_end;
1977 }
1978 else
1979 {
1980 xfree (buf);
1981 return;
1982 }
1983
1984 /* Loop invariant is that the [current_begin, current_end) was previously
1985 found to be not readable as a whole.
1986
1987 Note loop condition -- if the range has 1 byte, we can't divide the range
1988 so there's no point trying further. */
1989 while (current_end - current_begin > 1)
1990 {
1991 ULONGEST first_half_begin, first_half_end;
1992 ULONGEST second_half_begin, second_half_end;
1993 LONGEST xfer;
1994 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1995
1996 if (forward)
1997 {
1998 first_half_begin = current_begin;
1999 first_half_end = middle;
2000 second_half_begin = middle;
2001 second_half_end = current_end;
2002 }
2003 else
2004 {
2005 first_half_begin = middle;
2006 first_half_end = current_end;
2007 second_half_begin = current_begin;
2008 second_half_end = middle;
2009 }
2010
2011 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2012 buf + (first_half_begin - begin),
2013 first_half_begin,
2014 first_half_end - first_half_begin);
2015
2016 if (xfer == first_half_end - first_half_begin)
2017 {
2018 /* This half reads up fine. So, the error must be in the
2019 other half. */
2020 current_begin = second_half_begin;
2021 current_end = second_half_end;
2022 }
2023 else
2024 {
2025 /* This half is not readable. Because we've tried one byte, we
2026 know some part of this half if actually redable. Go to the next
2027 iteration to divide again and try to read.
2028
2029 We don't handle the other half, because this function only tries
2030 to read a single readable subrange. */
2031 current_begin = first_half_begin;
2032 current_end = first_half_end;
2033 }
2034 }
2035
2036 if (forward)
2037 {
2038 /* The [begin, current_begin) range has been read. */
2039 r.begin = begin;
2040 r.end = current_begin;
2041 r.data = buf;
2042 }
2043 else
2044 {
2045 /* The [current_end, end) range has been read. */
2046 LONGEST rlen = end - current_end;
2047
2048 r.data = xmalloc (rlen);
2049 memcpy (r.data, buf + current_end - begin, rlen);
2050 r.begin = current_end;
2051 r.end = end;
2052 xfree (buf);
2053 }
2054 VEC_safe_push(memory_read_result_s, (*result), &r);
2055 }
2056
2057 void
2058 free_memory_read_result_vector (void *x)
2059 {
2060 VEC(memory_read_result_s) *v = x;
2061 memory_read_result_s *current;
2062 int ix;
2063
2064 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2065 {
2066 xfree (current->data);
2067 }
2068 VEC_free (memory_read_result_s, v);
2069 }
2070
2071 VEC(memory_read_result_s) *
2072 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2073 {
2074 VEC(memory_read_result_s) *result = 0;
2075
2076 LONGEST xfered = 0;
2077 while (xfered < len)
2078 {
2079 struct mem_region *region = lookup_mem_region (offset + xfered);
2080 LONGEST rlen;
2081
2082 /* If there is no explicit region, a fake one should be created. */
2083 gdb_assert (region);
2084
2085 if (region->hi == 0)
2086 rlen = len - xfered;
2087 else
2088 rlen = region->hi - offset;
2089
2090 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2091 {
2092 /* Cannot read this region. Note that we can end up here only
2093 if the region is explicitly marked inaccessible, or
2094 'inaccessible-by-default' is in effect. */
2095 xfered += rlen;
2096 }
2097 else
2098 {
2099 LONGEST to_read = min (len - xfered, rlen);
2100 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2101
2102 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2103 (gdb_byte *) buffer,
2104 offset + xfered, to_read);
2105 /* Call an observer, notifying them of the xfer progress? */
2106 if (xfer <= 0)
2107 {
2108 /* Got an error reading full chunk. See if maybe we can read
2109 some subrange. */
2110 xfree (buffer);
2111 read_whatever_is_readable (ops, offset + xfered,
2112 offset + xfered + to_read, &result);
2113 xfered += to_read;
2114 }
2115 else
2116 {
2117 struct memory_read_result r;
2118 r.data = buffer;
2119 r.begin = offset + xfered;
2120 r.end = r.begin + xfer;
2121 VEC_safe_push (memory_read_result_s, result, &r);
2122 xfered += xfer;
2123 }
2124 QUIT;
2125 }
2126 }
2127 return result;
2128 }
2129
2130
2131 /* An alternative to target_write with progress callbacks. */
2132
2133 LONGEST
2134 target_write_with_progress (struct target_ops *ops,
2135 enum target_object object,
2136 const char *annex, const gdb_byte *buf,
2137 ULONGEST offset, LONGEST len,
2138 void (*progress) (ULONGEST, void *), void *baton)
2139 {
2140 LONGEST xfered = 0;
2141
2142 /* Give the progress callback a chance to set up. */
2143 if (progress)
2144 (*progress) (0, baton);
2145
2146 while (xfered < len)
2147 {
2148 LONGEST xfer = target_write_partial (ops, object, annex,
2149 (gdb_byte *) buf + xfered,
2150 offset + xfered, len - xfered);
2151
2152 if (xfer == 0)
2153 return xfered;
2154 if (xfer < 0)
2155 return -1;
2156
2157 if (progress)
2158 (*progress) (xfer, baton);
2159
2160 xfered += xfer;
2161 QUIT;
2162 }
2163 return len;
2164 }
2165
2166 /* For docs on target_write see target.h. */
2167
2168 LONGEST
2169 target_write (struct target_ops *ops,
2170 enum target_object object,
2171 const char *annex, const gdb_byte *buf,
2172 ULONGEST offset, LONGEST len)
2173 {
2174 return target_write_with_progress (ops, object, annex, buf, offset, len,
2175 NULL, NULL);
2176 }
2177
2178 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2179 the size of the transferred data. PADDING additional bytes are
2180 available in *BUF_P. This is a helper function for
2181 target_read_alloc; see the declaration of that function for more
2182 information. */
2183
2184 static LONGEST
2185 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2186 const char *annex, gdb_byte **buf_p, int padding)
2187 {
2188 size_t buf_alloc, buf_pos;
2189 gdb_byte *buf;
2190 LONGEST n;
2191
2192 /* This function does not have a length parameter; it reads the
2193 entire OBJECT). Also, it doesn't support objects fetched partly
2194 from one target and partly from another (in a different stratum,
2195 e.g. a core file and an executable). Both reasons make it
2196 unsuitable for reading memory. */
2197 gdb_assert (object != TARGET_OBJECT_MEMORY);
2198
2199 /* Start by reading up to 4K at a time. The target will throttle
2200 this number down if necessary. */
2201 buf_alloc = 4096;
2202 buf = xmalloc (buf_alloc);
2203 buf_pos = 0;
2204 while (1)
2205 {
2206 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2207 buf_pos, buf_alloc - buf_pos - padding);
2208 if (n < 0)
2209 {
2210 /* An error occurred. */
2211 xfree (buf);
2212 return -1;
2213 }
2214 else if (n == 0)
2215 {
2216 /* Read all there was. */
2217 if (buf_pos == 0)
2218 xfree (buf);
2219 else
2220 *buf_p = buf;
2221 return buf_pos;
2222 }
2223
2224 buf_pos += n;
2225
2226 /* If the buffer is filling up, expand it. */
2227 if (buf_alloc < buf_pos * 2)
2228 {
2229 buf_alloc *= 2;
2230 buf = xrealloc (buf, buf_alloc);
2231 }
2232
2233 QUIT;
2234 }
2235 }
2236
2237 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2238 the size of the transferred data. See the declaration in "target.h"
2239 function for more information about the return value. */
2240
2241 LONGEST
2242 target_read_alloc (struct target_ops *ops, enum target_object object,
2243 const char *annex, gdb_byte **buf_p)
2244 {
2245 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2246 }
2247
2248 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2249 returned as a string, allocated using xmalloc. If an error occurs
2250 or the transfer is unsupported, NULL is returned. Empty objects
2251 are returned as allocated but empty strings. A warning is issued
2252 if the result contains any embedded NUL bytes. */
2253
2254 char *
2255 target_read_stralloc (struct target_ops *ops, enum target_object object,
2256 const char *annex)
2257 {
2258 gdb_byte *buffer;
2259 LONGEST transferred;
2260
2261 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2262
2263 if (transferred < 0)
2264 return NULL;
2265
2266 if (transferred == 0)
2267 return xstrdup ("");
2268
2269 buffer[transferred] = 0;
2270 if (strlen (buffer) < transferred)
2271 warning (_("target object %d, annex %s, "
2272 "contained unexpected null characters"),
2273 (int) object, annex ? annex : "(none)");
2274
2275 return (char *) buffer;
2276 }
2277
2278 /* Memory transfer methods. */
2279
2280 void
2281 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2282 LONGEST len)
2283 {
2284 /* This method is used to read from an alternate, non-current
2285 target. This read must bypass the overlay support (as symbols
2286 don't match this target), and GDB's internal cache (wrong cache
2287 for this target). */
2288 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2289 != len)
2290 memory_error (EIO, addr);
2291 }
2292
2293 ULONGEST
2294 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2295 int len, enum bfd_endian byte_order)
2296 {
2297 gdb_byte buf[sizeof (ULONGEST)];
2298
2299 gdb_assert (len <= sizeof (buf));
2300 get_target_memory (ops, addr, buf, len);
2301 return extract_unsigned_integer (buf, len, byte_order);
2302 }
2303
2304 int
2305 target_insert_breakpoint (struct gdbarch *gdbarch,
2306 struct bp_target_info *bp_tgt)
2307 {
2308 if (!may_insert_breakpoints)
2309 {
2310 warning (_("May not insert breakpoints"));
2311 return 1;
2312 }
2313
2314 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2315 }
2316
2317 int
2318 target_remove_breakpoint (struct gdbarch *gdbarch,
2319 struct bp_target_info *bp_tgt)
2320 {
2321 /* This is kind of a weird case to handle, but the permission might
2322 have been changed after breakpoints were inserted - in which case
2323 we should just take the user literally and assume that any
2324 breakpoints should be left in place. */
2325 if (!may_insert_breakpoints)
2326 {
2327 warning (_("May not remove breakpoints"));
2328 return 1;
2329 }
2330
2331 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2332 }
2333
2334 static void
2335 target_info (char *args, int from_tty)
2336 {
2337 struct target_ops *t;
2338 int has_all_mem = 0;
2339
2340 if (symfile_objfile != NULL)
2341 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2342
2343 for (t = target_stack; t != NULL; t = t->beneath)
2344 {
2345 if (!(*t->to_has_memory) (t))
2346 continue;
2347
2348 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2349 continue;
2350 if (has_all_mem)
2351 printf_unfiltered (_("\tWhile running this, "
2352 "GDB does not access memory from...\n"));
2353 printf_unfiltered ("%s:\n", t->to_longname);
2354 (t->to_files_info) (t);
2355 has_all_mem = (*t->to_has_all_memory) (t);
2356 }
2357 }
2358
2359 /* This function is called before any new inferior is created, e.g.
2360 by running a program, attaching, or connecting to a target.
2361 It cleans up any state from previous invocations which might
2362 change between runs. This is a subset of what target_preopen
2363 resets (things which might change between targets). */
2364
2365 void
2366 target_pre_inferior (int from_tty)
2367 {
2368 /* Clear out solib state. Otherwise the solib state of the previous
2369 inferior might have survived and is entirely wrong for the new
2370 target. This has been observed on GNU/Linux using glibc 2.3. How
2371 to reproduce:
2372
2373 bash$ ./foo&
2374 [1] 4711
2375 bash$ ./foo&
2376 [1] 4712
2377 bash$ gdb ./foo
2378 [...]
2379 (gdb) attach 4711
2380 (gdb) detach
2381 (gdb) attach 4712
2382 Cannot access memory at address 0xdeadbeef
2383 */
2384
2385 /* In some OSs, the shared library list is the same/global/shared
2386 across inferiors. If code is shared between processes, so are
2387 memory regions and features. */
2388 if (!gdbarch_has_global_solist (target_gdbarch))
2389 {
2390 no_shared_libraries (NULL, from_tty);
2391
2392 invalidate_target_mem_regions ();
2393
2394 target_clear_description ();
2395 }
2396 }
2397
2398 /* Callback for iterate_over_inferiors. Gets rid of the given
2399 inferior. */
2400
2401 static int
2402 dispose_inferior (struct inferior *inf, void *args)
2403 {
2404 struct thread_info *thread;
2405
2406 thread = any_thread_of_process (inf->pid);
2407 if (thread)
2408 {
2409 switch_to_thread (thread->ptid);
2410
2411 /* Core inferiors actually should be detached, not killed. */
2412 if (target_has_execution)
2413 target_kill ();
2414 else
2415 target_detach (NULL, 0);
2416 }
2417
2418 return 0;
2419 }
2420
2421 /* This is to be called by the open routine before it does
2422 anything. */
2423
2424 void
2425 target_preopen (int from_tty)
2426 {
2427 dont_repeat ();
2428
2429 if (have_inferiors ())
2430 {
2431 if (!from_tty
2432 || !have_live_inferiors ()
2433 || query (_("A program is being debugged already. Kill it? ")))
2434 iterate_over_inferiors (dispose_inferior, NULL);
2435 else
2436 error (_("Program not killed."));
2437 }
2438
2439 /* Calling target_kill may remove the target from the stack. But if
2440 it doesn't (which seems like a win for UDI), remove it now. */
2441 /* Leave the exec target, though. The user may be switching from a
2442 live process to a core of the same program. */
2443 pop_all_targets_above (file_stratum, 0);
2444
2445 target_pre_inferior (from_tty);
2446 }
2447
2448 /* Detach a target after doing deferred register stores. */
2449
2450 void
2451 target_detach (char *args, int from_tty)
2452 {
2453 struct target_ops* t;
2454
2455 if (gdbarch_has_global_breakpoints (target_gdbarch))
2456 /* Don't remove global breakpoints here. They're removed on
2457 disconnection from the target. */
2458 ;
2459 else
2460 /* If we're in breakpoints-always-inserted mode, have to remove
2461 them before detaching. */
2462 remove_breakpoints_pid (PIDGET (inferior_ptid));
2463
2464 prepare_for_detach ();
2465
2466 for (t = current_target.beneath; t != NULL; t = t->beneath)
2467 {
2468 if (t->to_detach != NULL)
2469 {
2470 t->to_detach (t, args, from_tty);
2471 if (targetdebug)
2472 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2473 args, from_tty);
2474 return;
2475 }
2476 }
2477
2478 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2479 }
2480
2481 void
2482 target_disconnect (char *args, int from_tty)
2483 {
2484 struct target_ops *t;
2485
2486 /* If we're in breakpoints-always-inserted mode or if breakpoints
2487 are global across processes, we have to remove them before
2488 disconnecting. */
2489 remove_breakpoints ();
2490
2491 for (t = current_target.beneath; t != NULL; t = t->beneath)
2492 if (t->to_disconnect != NULL)
2493 {
2494 if (targetdebug)
2495 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2496 args, from_tty);
2497 t->to_disconnect (t, args, from_tty);
2498 return;
2499 }
2500
2501 tcomplain ();
2502 }
2503
2504 ptid_t
2505 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2506 {
2507 struct target_ops *t;
2508
2509 for (t = current_target.beneath; t != NULL; t = t->beneath)
2510 {
2511 if (t->to_wait != NULL)
2512 {
2513 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2514
2515 if (targetdebug)
2516 {
2517 char *status_string;
2518
2519 status_string = target_waitstatus_to_string (status);
2520 fprintf_unfiltered (gdb_stdlog,
2521 "target_wait (%d, status) = %d, %s\n",
2522 PIDGET (ptid), PIDGET (retval),
2523 status_string);
2524 xfree (status_string);
2525 }
2526
2527 return retval;
2528 }
2529 }
2530
2531 noprocess ();
2532 }
2533
2534 char *
2535 target_pid_to_str (ptid_t ptid)
2536 {
2537 struct target_ops *t;
2538
2539 for (t = current_target.beneath; t != NULL; t = t->beneath)
2540 {
2541 if (t->to_pid_to_str != NULL)
2542 return (*t->to_pid_to_str) (t, ptid);
2543 }
2544
2545 return normal_pid_to_str (ptid);
2546 }
2547
2548 char *
2549 target_thread_name (struct thread_info *info)
2550 {
2551 struct target_ops *t;
2552
2553 for (t = current_target.beneath; t != NULL; t = t->beneath)
2554 {
2555 if (t->to_thread_name != NULL)
2556 return (*t->to_thread_name) (info);
2557 }
2558
2559 return NULL;
2560 }
2561
2562 void
2563 target_resume (ptid_t ptid, int step, enum target_signal signal)
2564 {
2565 struct target_ops *t;
2566
2567 target_dcache_invalidate ();
2568
2569 for (t = current_target.beneath; t != NULL; t = t->beneath)
2570 {
2571 if (t->to_resume != NULL)
2572 {
2573 t->to_resume (t, ptid, step, signal);
2574 if (targetdebug)
2575 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2576 PIDGET (ptid),
2577 step ? "step" : "continue",
2578 target_signal_to_name (signal));
2579
2580 registers_changed_ptid (ptid);
2581 set_executing (ptid, 1);
2582 set_running (ptid, 1);
2583 clear_inline_frame_state (ptid);
2584 return;
2585 }
2586 }
2587
2588 noprocess ();
2589 }
2590 /* Look through the list of possible targets for a target that can
2591 follow forks. */
2592
2593 int
2594 target_follow_fork (int follow_child)
2595 {
2596 struct target_ops *t;
2597
2598 for (t = current_target.beneath; t != NULL; t = t->beneath)
2599 {
2600 if (t->to_follow_fork != NULL)
2601 {
2602 int retval = t->to_follow_fork (t, follow_child);
2603
2604 if (targetdebug)
2605 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2606 follow_child, retval);
2607 return retval;
2608 }
2609 }
2610
2611 /* Some target returned a fork event, but did not know how to follow it. */
2612 internal_error (__FILE__, __LINE__,
2613 _("could not find a target to follow fork"));
2614 }
2615
2616 void
2617 target_mourn_inferior (void)
2618 {
2619 struct target_ops *t;
2620
2621 for (t = current_target.beneath; t != NULL; t = t->beneath)
2622 {
2623 if (t->to_mourn_inferior != NULL)
2624 {
2625 t->to_mourn_inferior (t);
2626 if (targetdebug)
2627 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2628
2629 /* We no longer need to keep handles on any of the object files.
2630 Make sure to release them to avoid unnecessarily locking any
2631 of them while we're not actually debugging. */
2632 bfd_cache_close_all ();
2633
2634 return;
2635 }
2636 }
2637
2638 internal_error (__FILE__, __LINE__,
2639 _("could not find a target to follow mourn inferior"));
2640 }
2641
2642 /* Look for a target which can describe architectural features, starting
2643 from TARGET. If we find one, return its description. */
2644
2645 const struct target_desc *
2646 target_read_description (struct target_ops *target)
2647 {
2648 struct target_ops *t;
2649
2650 for (t = target; t != NULL; t = t->beneath)
2651 if (t->to_read_description != NULL)
2652 {
2653 const struct target_desc *tdesc;
2654
2655 tdesc = t->to_read_description (t);
2656 if (tdesc)
2657 return tdesc;
2658 }
2659
2660 return NULL;
2661 }
2662
2663 /* The default implementation of to_search_memory.
2664 This implements a basic search of memory, reading target memory and
2665 performing the search here (as opposed to performing the search in on the
2666 target side with, for example, gdbserver). */
2667
2668 int
2669 simple_search_memory (struct target_ops *ops,
2670 CORE_ADDR start_addr, ULONGEST search_space_len,
2671 const gdb_byte *pattern, ULONGEST pattern_len,
2672 CORE_ADDR *found_addrp)
2673 {
2674 /* NOTE: also defined in find.c testcase. */
2675 #define SEARCH_CHUNK_SIZE 16000
2676 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2677 /* Buffer to hold memory contents for searching. */
2678 gdb_byte *search_buf;
2679 unsigned search_buf_size;
2680 struct cleanup *old_cleanups;
2681
2682 search_buf_size = chunk_size + pattern_len - 1;
2683
2684 /* No point in trying to allocate a buffer larger than the search space. */
2685 if (search_space_len < search_buf_size)
2686 search_buf_size = search_space_len;
2687
2688 search_buf = malloc (search_buf_size);
2689 if (search_buf == NULL)
2690 error (_("Unable to allocate memory to perform the search."));
2691 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2692
2693 /* Prime the search buffer. */
2694
2695 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2696 search_buf, start_addr, search_buf_size) != search_buf_size)
2697 {
2698 warning (_("Unable to access target memory at %s, halting search."),
2699 hex_string (start_addr));
2700 do_cleanups (old_cleanups);
2701 return -1;
2702 }
2703
2704 /* Perform the search.
2705
2706 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2707 When we've scanned N bytes we copy the trailing bytes to the start and
2708 read in another N bytes. */
2709
2710 while (search_space_len >= pattern_len)
2711 {
2712 gdb_byte *found_ptr;
2713 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2714
2715 found_ptr = memmem (search_buf, nr_search_bytes,
2716 pattern, pattern_len);
2717
2718 if (found_ptr != NULL)
2719 {
2720 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2721
2722 *found_addrp = found_addr;
2723 do_cleanups (old_cleanups);
2724 return 1;
2725 }
2726
2727 /* Not found in this chunk, skip to next chunk. */
2728
2729 /* Don't let search_space_len wrap here, it's unsigned. */
2730 if (search_space_len >= chunk_size)
2731 search_space_len -= chunk_size;
2732 else
2733 search_space_len = 0;
2734
2735 if (search_space_len >= pattern_len)
2736 {
2737 unsigned keep_len = search_buf_size - chunk_size;
2738 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2739 int nr_to_read;
2740
2741 /* Copy the trailing part of the previous iteration to the front
2742 of the buffer for the next iteration. */
2743 gdb_assert (keep_len == pattern_len - 1);
2744 memcpy (search_buf, search_buf + chunk_size, keep_len);
2745
2746 nr_to_read = min (search_space_len - keep_len, chunk_size);
2747
2748 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2749 search_buf + keep_len, read_addr,
2750 nr_to_read) != nr_to_read)
2751 {
2752 warning (_("Unable to access target "
2753 "memory at %s, halting search."),
2754 hex_string (read_addr));
2755 do_cleanups (old_cleanups);
2756 return -1;
2757 }
2758
2759 start_addr += chunk_size;
2760 }
2761 }
2762
2763 /* Not found. */
2764
2765 do_cleanups (old_cleanups);
2766 return 0;
2767 }
2768
2769 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2770 sequence of bytes in PATTERN with length PATTERN_LEN.
2771
2772 The result is 1 if found, 0 if not found, and -1 if there was an error
2773 requiring halting of the search (e.g. memory read error).
2774 If the pattern is found the address is recorded in FOUND_ADDRP. */
2775
2776 int
2777 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2778 const gdb_byte *pattern, ULONGEST pattern_len,
2779 CORE_ADDR *found_addrp)
2780 {
2781 struct target_ops *t;
2782 int found;
2783
2784 /* We don't use INHERIT to set current_target.to_search_memory,
2785 so we have to scan the target stack and handle targetdebug
2786 ourselves. */
2787
2788 if (targetdebug)
2789 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2790 hex_string (start_addr));
2791
2792 for (t = current_target.beneath; t != NULL; t = t->beneath)
2793 if (t->to_search_memory != NULL)
2794 break;
2795
2796 if (t != NULL)
2797 {
2798 found = t->to_search_memory (t, start_addr, search_space_len,
2799 pattern, pattern_len, found_addrp);
2800 }
2801 else
2802 {
2803 /* If a special version of to_search_memory isn't available, use the
2804 simple version. */
2805 found = simple_search_memory (current_target.beneath,
2806 start_addr, search_space_len,
2807 pattern, pattern_len, found_addrp);
2808 }
2809
2810 if (targetdebug)
2811 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2812
2813 return found;
2814 }
2815
2816 /* Look through the currently pushed targets. If none of them will
2817 be able to restart the currently running process, issue an error
2818 message. */
2819
2820 void
2821 target_require_runnable (void)
2822 {
2823 struct target_ops *t;
2824
2825 for (t = target_stack; t != NULL; t = t->beneath)
2826 {
2827 /* If this target knows how to create a new program, then
2828 assume we will still be able to after killing the current
2829 one. Either killing and mourning will not pop T, or else
2830 find_default_run_target will find it again. */
2831 if (t->to_create_inferior != NULL)
2832 return;
2833
2834 /* Do not worry about thread_stratum targets that can not
2835 create inferiors. Assume they will be pushed again if
2836 necessary, and continue to the process_stratum. */
2837 if (t->to_stratum == thread_stratum
2838 || t->to_stratum == arch_stratum)
2839 continue;
2840
2841 error (_("The \"%s\" target does not support \"run\". "
2842 "Try \"help target\" or \"continue\"."),
2843 t->to_shortname);
2844 }
2845
2846 /* This function is only called if the target is running. In that
2847 case there should have been a process_stratum target and it
2848 should either know how to create inferiors, or not... */
2849 internal_error (__FILE__, __LINE__, _("No targets found"));
2850 }
2851
2852 /* Look through the list of possible targets for a target that can
2853 execute a run or attach command without any other data. This is
2854 used to locate the default process stratum.
2855
2856 If DO_MESG is not NULL, the result is always valid (error() is
2857 called for errors); else, return NULL on error. */
2858
2859 static struct target_ops *
2860 find_default_run_target (char *do_mesg)
2861 {
2862 struct target_ops **t;
2863 struct target_ops *runable = NULL;
2864 int count;
2865
2866 count = 0;
2867
2868 for (t = target_structs; t < target_structs + target_struct_size;
2869 ++t)
2870 {
2871 if ((*t)->to_can_run && target_can_run (*t))
2872 {
2873 runable = *t;
2874 ++count;
2875 }
2876 }
2877
2878 if (count != 1)
2879 {
2880 if (do_mesg)
2881 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2882 else
2883 return NULL;
2884 }
2885
2886 return runable;
2887 }
2888
2889 void
2890 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2891 {
2892 struct target_ops *t;
2893
2894 t = find_default_run_target ("attach");
2895 (t->to_attach) (t, args, from_tty);
2896 return;
2897 }
2898
2899 void
2900 find_default_create_inferior (struct target_ops *ops,
2901 char *exec_file, char *allargs, char **env,
2902 int from_tty)
2903 {
2904 struct target_ops *t;
2905
2906 t = find_default_run_target ("run");
2907 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2908 return;
2909 }
2910
2911 static int
2912 find_default_can_async_p (void)
2913 {
2914 struct target_ops *t;
2915
2916 /* This may be called before the target is pushed on the stack;
2917 look for the default process stratum. If there's none, gdb isn't
2918 configured with a native debugger, and target remote isn't
2919 connected yet. */
2920 t = find_default_run_target (NULL);
2921 if (t && t->to_can_async_p)
2922 return (t->to_can_async_p) ();
2923 return 0;
2924 }
2925
2926 static int
2927 find_default_is_async_p (void)
2928 {
2929 struct target_ops *t;
2930
2931 /* This may be called before the target is pushed on the stack;
2932 look for the default process stratum. If there's none, gdb isn't
2933 configured with a native debugger, and target remote isn't
2934 connected yet. */
2935 t = find_default_run_target (NULL);
2936 if (t && t->to_is_async_p)
2937 return (t->to_is_async_p) ();
2938 return 0;
2939 }
2940
2941 static int
2942 find_default_supports_non_stop (void)
2943 {
2944 struct target_ops *t;
2945
2946 t = find_default_run_target (NULL);
2947 if (t && t->to_supports_non_stop)
2948 return (t->to_supports_non_stop) ();
2949 return 0;
2950 }
2951
2952 int
2953 target_supports_non_stop (void)
2954 {
2955 struct target_ops *t;
2956
2957 for (t = &current_target; t != NULL; t = t->beneath)
2958 if (t->to_supports_non_stop)
2959 return t->to_supports_non_stop ();
2960
2961 return 0;
2962 }
2963
2964
2965 char *
2966 target_get_osdata (const char *type)
2967 {
2968 struct target_ops *t;
2969
2970 /* If we're already connected to something that can get us OS
2971 related data, use it. Otherwise, try using the native
2972 target. */
2973 if (current_target.to_stratum >= process_stratum)
2974 t = current_target.beneath;
2975 else
2976 t = find_default_run_target ("get OS data");
2977
2978 if (!t)
2979 return NULL;
2980
2981 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2982 }
2983
2984 /* Determine the current address space of thread PTID. */
2985
2986 struct address_space *
2987 target_thread_address_space (ptid_t ptid)
2988 {
2989 struct address_space *aspace;
2990 struct inferior *inf;
2991 struct target_ops *t;
2992
2993 for (t = current_target.beneath; t != NULL; t = t->beneath)
2994 {
2995 if (t->to_thread_address_space != NULL)
2996 {
2997 aspace = t->to_thread_address_space (t, ptid);
2998 gdb_assert (aspace);
2999
3000 if (targetdebug)
3001 fprintf_unfiltered (gdb_stdlog,
3002 "target_thread_address_space (%s) = %d\n",
3003 target_pid_to_str (ptid),
3004 address_space_num (aspace));
3005 return aspace;
3006 }
3007 }
3008
3009 /* Fall-back to the "main" address space of the inferior. */
3010 inf = find_inferior_pid (ptid_get_pid (ptid));
3011
3012 if (inf == NULL || inf->aspace == NULL)
3013 internal_error (__FILE__, __LINE__,
3014 _("Can't determine the current "
3015 "address space of thread %s\n"),
3016 target_pid_to_str (ptid));
3017
3018 return inf->aspace;
3019 }
3020
3021 static int
3022 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3023 {
3024 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
3025 }
3026
3027 static int
3028 default_watchpoint_addr_within_range (struct target_ops *target,
3029 CORE_ADDR addr,
3030 CORE_ADDR start, int length)
3031 {
3032 return addr >= start && addr < start + length;
3033 }
3034
3035 static struct gdbarch *
3036 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3037 {
3038 return target_gdbarch;
3039 }
3040
3041 static int
3042 return_zero (void)
3043 {
3044 return 0;
3045 }
3046
3047 static int
3048 return_one (void)
3049 {
3050 return 1;
3051 }
3052
3053 static int
3054 return_minus_one (void)
3055 {
3056 return -1;
3057 }
3058
3059 /* Find a single runnable target in the stack and return it. If for
3060 some reason there is more than one, return NULL. */
3061
3062 struct target_ops *
3063 find_run_target (void)
3064 {
3065 struct target_ops **t;
3066 struct target_ops *runable = NULL;
3067 int count;
3068
3069 count = 0;
3070
3071 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3072 {
3073 if ((*t)->to_can_run && target_can_run (*t))
3074 {
3075 runable = *t;
3076 ++count;
3077 }
3078 }
3079
3080 return (count == 1 ? runable : NULL);
3081 }
3082
3083 /*
3084 * Find the next target down the stack from the specified target.
3085 */
3086
3087 struct target_ops *
3088 find_target_beneath (struct target_ops *t)
3089 {
3090 return t->beneath;
3091 }
3092
3093 \f
3094 /* The inferior process has died. Long live the inferior! */
3095
3096 void
3097 generic_mourn_inferior (void)
3098 {
3099 ptid_t ptid;
3100
3101 ptid = inferior_ptid;
3102 inferior_ptid = null_ptid;
3103
3104 if (!ptid_equal (ptid, null_ptid))
3105 {
3106 int pid = ptid_get_pid (ptid);
3107 exit_inferior (pid);
3108 }
3109
3110 breakpoint_init_inferior (inf_exited);
3111 registers_changed ();
3112
3113 reopen_exec_file ();
3114 reinit_frame_cache ();
3115
3116 if (deprecated_detach_hook)
3117 deprecated_detach_hook ();
3118 }
3119 \f
3120 /* Helper function for child_wait and the derivatives of child_wait.
3121 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
3122 translation of that in OURSTATUS. */
3123 void
3124 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
3125 {
3126 if (WIFEXITED (hoststatus))
3127 {
3128 ourstatus->kind = TARGET_WAITKIND_EXITED;
3129 ourstatus->value.integer = WEXITSTATUS (hoststatus);
3130 }
3131 else if (!WIFSTOPPED (hoststatus))
3132 {
3133 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3134 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
3135 }
3136 else
3137 {
3138 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3139 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
3140 }
3141 }
3142 \f
3143 /* Convert a normal process ID to a string. Returns the string in a
3144 static buffer. */
3145
3146 char *
3147 normal_pid_to_str (ptid_t ptid)
3148 {
3149 static char buf[32];
3150
3151 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3152 return buf;
3153 }
3154
3155 static char *
3156 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3157 {
3158 return normal_pid_to_str (ptid);
3159 }
3160
3161 /* Error-catcher for target_find_memory_regions. */
3162 static int
3163 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3164 {
3165 error (_("Command not implemented for this target."));
3166 return 0;
3167 }
3168
3169 /* Error-catcher for target_make_corefile_notes. */
3170 static char *
3171 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3172 {
3173 error (_("Command not implemented for this target."));
3174 return NULL;
3175 }
3176
3177 /* Error-catcher for target_get_bookmark. */
3178 static gdb_byte *
3179 dummy_get_bookmark (char *ignore1, int ignore2)
3180 {
3181 tcomplain ();
3182 return NULL;
3183 }
3184
3185 /* Error-catcher for target_goto_bookmark. */
3186 static void
3187 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3188 {
3189 tcomplain ();
3190 }
3191
3192 /* Set up the handful of non-empty slots needed by the dummy target
3193 vector. */
3194
3195 static void
3196 init_dummy_target (void)
3197 {
3198 dummy_target.to_shortname = "None";
3199 dummy_target.to_longname = "None";
3200 dummy_target.to_doc = "";
3201 dummy_target.to_attach = find_default_attach;
3202 dummy_target.to_detach =
3203 (void (*)(struct target_ops *, char *, int))target_ignore;
3204 dummy_target.to_create_inferior = find_default_create_inferior;
3205 dummy_target.to_can_async_p = find_default_can_async_p;
3206 dummy_target.to_is_async_p = find_default_is_async_p;
3207 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3208 dummy_target.to_pid_to_str = dummy_pid_to_str;
3209 dummy_target.to_stratum = dummy_stratum;
3210 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3211 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3212 dummy_target.to_get_bookmark = dummy_get_bookmark;
3213 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3214 dummy_target.to_xfer_partial = default_xfer_partial;
3215 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3216 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3217 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3218 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3219 dummy_target.to_has_execution
3220 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3221 dummy_target.to_stopped_by_watchpoint = return_zero;
3222 dummy_target.to_stopped_data_address =
3223 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3224 dummy_target.to_magic = OPS_MAGIC;
3225 }
3226 \f
3227 static void
3228 debug_to_open (char *args, int from_tty)
3229 {
3230 debug_target.to_open (args, from_tty);
3231
3232 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3233 }
3234
3235 void
3236 target_close (struct target_ops *targ, int quitting)
3237 {
3238 if (targ->to_xclose != NULL)
3239 targ->to_xclose (targ, quitting);
3240 else if (targ->to_close != NULL)
3241 targ->to_close (quitting);
3242
3243 if (targetdebug)
3244 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3245 }
3246
3247 void
3248 target_attach (char *args, int from_tty)
3249 {
3250 struct target_ops *t;
3251
3252 for (t = current_target.beneath; t != NULL; t = t->beneath)
3253 {
3254 if (t->to_attach != NULL)
3255 {
3256 t->to_attach (t, args, from_tty);
3257 if (targetdebug)
3258 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3259 args, from_tty);
3260 return;
3261 }
3262 }
3263
3264 internal_error (__FILE__, __LINE__,
3265 _("could not find a target to attach"));
3266 }
3267
3268 int
3269 target_thread_alive (ptid_t ptid)
3270 {
3271 struct target_ops *t;
3272
3273 for (t = current_target.beneath; t != NULL; t = t->beneath)
3274 {
3275 if (t->to_thread_alive != NULL)
3276 {
3277 int retval;
3278
3279 retval = t->to_thread_alive (t, ptid);
3280 if (targetdebug)
3281 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3282 PIDGET (ptid), retval);
3283
3284 return retval;
3285 }
3286 }
3287
3288 return 0;
3289 }
3290
3291 void
3292 target_find_new_threads (void)
3293 {
3294 struct target_ops *t;
3295
3296 for (t = current_target.beneath; t != NULL; t = t->beneath)
3297 {
3298 if (t->to_find_new_threads != NULL)
3299 {
3300 t->to_find_new_threads (t);
3301 if (targetdebug)
3302 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3303
3304 return;
3305 }
3306 }
3307 }
3308
3309 void
3310 target_stop (ptid_t ptid)
3311 {
3312 if (!may_stop)
3313 {
3314 warning (_("May not interrupt or stop the target, ignoring attempt"));
3315 return;
3316 }
3317
3318 (*current_target.to_stop) (ptid);
3319 }
3320
3321 static void
3322 debug_to_post_attach (int pid)
3323 {
3324 debug_target.to_post_attach (pid);
3325
3326 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3327 }
3328
3329 /* Return a pretty printed form of target_waitstatus.
3330 Space for the result is malloc'd, caller must free. */
3331
3332 char *
3333 target_waitstatus_to_string (const struct target_waitstatus *ws)
3334 {
3335 const char *kind_str = "status->kind = ";
3336
3337 switch (ws->kind)
3338 {
3339 case TARGET_WAITKIND_EXITED:
3340 return xstrprintf ("%sexited, status = %d",
3341 kind_str, ws->value.integer);
3342 case TARGET_WAITKIND_STOPPED:
3343 return xstrprintf ("%sstopped, signal = %s",
3344 kind_str, target_signal_to_name (ws->value.sig));
3345 case TARGET_WAITKIND_SIGNALLED:
3346 return xstrprintf ("%ssignalled, signal = %s",
3347 kind_str, target_signal_to_name (ws->value.sig));
3348 case TARGET_WAITKIND_LOADED:
3349 return xstrprintf ("%sloaded", kind_str);
3350 case TARGET_WAITKIND_FORKED:
3351 return xstrprintf ("%sforked", kind_str);
3352 case TARGET_WAITKIND_VFORKED:
3353 return xstrprintf ("%svforked", kind_str);
3354 case TARGET_WAITKIND_EXECD:
3355 return xstrprintf ("%sexecd", kind_str);
3356 case TARGET_WAITKIND_SYSCALL_ENTRY:
3357 return xstrprintf ("%sentered syscall", kind_str);
3358 case TARGET_WAITKIND_SYSCALL_RETURN:
3359 return xstrprintf ("%sexited syscall", kind_str);
3360 case TARGET_WAITKIND_SPURIOUS:
3361 return xstrprintf ("%sspurious", kind_str);
3362 case TARGET_WAITKIND_IGNORE:
3363 return xstrprintf ("%signore", kind_str);
3364 case TARGET_WAITKIND_NO_HISTORY:
3365 return xstrprintf ("%sno-history", kind_str);
3366 default:
3367 return xstrprintf ("%sunknown???", kind_str);
3368 }
3369 }
3370
3371 static void
3372 debug_print_register (const char * func,
3373 struct regcache *regcache, int regno)
3374 {
3375 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3376
3377 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3378 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3379 && gdbarch_register_name (gdbarch, regno) != NULL
3380 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3381 fprintf_unfiltered (gdb_stdlog, "(%s)",
3382 gdbarch_register_name (gdbarch, regno));
3383 else
3384 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3385 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3386 {
3387 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3388 int i, size = register_size (gdbarch, regno);
3389 unsigned char buf[MAX_REGISTER_SIZE];
3390
3391 regcache_raw_collect (regcache, regno, buf);
3392 fprintf_unfiltered (gdb_stdlog, " = ");
3393 for (i = 0; i < size; i++)
3394 {
3395 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3396 }
3397 if (size <= sizeof (LONGEST))
3398 {
3399 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3400
3401 fprintf_unfiltered (gdb_stdlog, " %s %s",
3402 core_addr_to_string_nz (val), plongest (val));
3403 }
3404 }
3405 fprintf_unfiltered (gdb_stdlog, "\n");
3406 }
3407
3408 void
3409 target_fetch_registers (struct regcache *regcache, int regno)
3410 {
3411 struct target_ops *t;
3412
3413 for (t = current_target.beneath; t != NULL; t = t->beneath)
3414 {
3415 if (t->to_fetch_registers != NULL)
3416 {
3417 t->to_fetch_registers (t, regcache, regno);
3418 if (targetdebug)
3419 debug_print_register ("target_fetch_registers", regcache, regno);
3420 return;
3421 }
3422 }
3423 }
3424
3425 void
3426 target_store_registers (struct regcache *regcache, int regno)
3427 {
3428 struct target_ops *t;
3429
3430 if (!may_write_registers)
3431 error (_("Writing to registers is not allowed (regno %d)"), regno);
3432
3433 for (t = current_target.beneath; t != NULL; t = t->beneath)
3434 {
3435 if (t->to_store_registers != NULL)
3436 {
3437 t->to_store_registers (t, regcache, regno);
3438 if (targetdebug)
3439 {
3440 debug_print_register ("target_store_registers", regcache, regno);
3441 }
3442 return;
3443 }
3444 }
3445
3446 noprocess ();
3447 }
3448
3449 int
3450 target_core_of_thread (ptid_t ptid)
3451 {
3452 struct target_ops *t;
3453
3454 for (t = current_target.beneath; t != NULL; t = t->beneath)
3455 {
3456 if (t->to_core_of_thread != NULL)
3457 {
3458 int retval = t->to_core_of_thread (t, ptid);
3459
3460 if (targetdebug)
3461 fprintf_unfiltered (gdb_stdlog,
3462 "target_core_of_thread (%d) = %d\n",
3463 PIDGET (ptid), retval);
3464 return retval;
3465 }
3466 }
3467
3468 return -1;
3469 }
3470
3471 int
3472 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3473 {
3474 struct target_ops *t;
3475
3476 for (t = current_target.beneath; t != NULL; t = t->beneath)
3477 {
3478 if (t->to_verify_memory != NULL)
3479 {
3480 int retval = t->to_verify_memory (t, data, memaddr, size);
3481
3482 if (targetdebug)
3483 fprintf_unfiltered (gdb_stdlog,
3484 "target_verify_memory (%s, %s) = %d\n",
3485 paddress (target_gdbarch, memaddr),
3486 pulongest (size),
3487 retval);
3488 return retval;
3489 }
3490 }
3491
3492 tcomplain ();
3493 }
3494
3495 /* The documentation for this function is in its prototype declaration
3496 in target.h. */
3497
3498 int
3499 target_ranged_break_num_registers (void)
3500 {
3501 struct target_ops *t;
3502
3503 for (t = current_target.beneath; t != NULL; t = t->beneath)
3504 if (t->to_ranged_break_num_registers != NULL)
3505 return t->to_ranged_break_num_registers (t);
3506
3507 return -1;
3508 }
3509
3510 static void
3511 debug_to_prepare_to_store (struct regcache *regcache)
3512 {
3513 debug_target.to_prepare_to_store (regcache);
3514
3515 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3516 }
3517
3518 static int
3519 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3520 int write, struct mem_attrib *attrib,
3521 struct target_ops *target)
3522 {
3523 int retval;
3524
3525 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3526 attrib, target);
3527
3528 fprintf_unfiltered (gdb_stdlog,
3529 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3530 paddress (target_gdbarch, memaddr), len,
3531 write ? "write" : "read", retval);
3532
3533 if (retval > 0)
3534 {
3535 int i;
3536
3537 fputs_unfiltered (", bytes =", gdb_stdlog);
3538 for (i = 0; i < retval; i++)
3539 {
3540 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3541 {
3542 if (targetdebug < 2 && i > 0)
3543 {
3544 fprintf_unfiltered (gdb_stdlog, " ...");
3545 break;
3546 }
3547 fprintf_unfiltered (gdb_stdlog, "\n");
3548 }
3549
3550 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3551 }
3552 }
3553
3554 fputc_unfiltered ('\n', gdb_stdlog);
3555
3556 return retval;
3557 }
3558
3559 static void
3560 debug_to_files_info (struct target_ops *target)
3561 {
3562 debug_target.to_files_info (target);
3563
3564 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3565 }
3566
3567 static int
3568 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3569 struct bp_target_info *bp_tgt)
3570 {
3571 int retval;
3572
3573 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3574
3575 fprintf_unfiltered (gdb_stdlog,
3576 "target_insert_breakpoint (%s, xxx) = %ld\n",
3577 core_addr_to_string (bp_tgt->placed_address),
3578 (unsigned long) retval);
3579 return retval;
3580 }
3581
3582 static int
3583 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3584 struct bp_target_info *bp_tgt)
3585 {
3586 int retval;
3587
3588 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3589
3590 fprintf_unfiltered (gdb_stdlog,
3591 "target_remove_breakpoint (%s, xxx) = %ld\n",
3592 core_addr_to_string (bp_tgt->placed_address),
3593 (unsigned long) retval);
3594 return retval;
3595 }
3596
3597 static int
3598 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3599 {
3600 int retval;
3601
3602 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3603
3604 fprintf_unfiltered (gdb_stdlog,
3605 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3606 (unsigned long) type,
3607 (unsigned long) cnt,
3608 (unsigned long) from_tty,
3609 (unsigned long) retval);
3610 return retval;
3611 }
3612
3613 static int
3614 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3615 {
3616 CORE_ADDR retval;
3617
3618 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3619
3620 fprintf_unfiltered (gdb_stdlog,
3621 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3622 core_addr_to_string (addr), (unsigned long) len,
3623 core_addr_to_string (retval));
3624 return retval;
3625 }
3626
3627 static int
3628 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
3629 struct expression *cond)
3630 {
3631 int retval;
3632
3633 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
3634 rw, cond);
3635
3636 fprintf_unfiltered (gdb_stdlog,
3637 "target_can_accel_watchpoint_condition "
3638 "(%s, %d, %d, %s) = %ld\n",
3639 core_addr_to_string (addr), len, rw,
3640 host_address_to_string (cond), (unsigned long) retval);
3641 return retval;
3642 }
3643
3644 static int
3645 debug_to_stopped_by_watchpoint (void)
3646 {
3647 int retval;
3648
3649 retval = debug_target.to_stopped_by_watchpoint ();
3650
3651 fprintf_unfiltered (gdb_stdlog,
3652 "target_stopped_by_watchpoint () = %ld\n",
3653 (unsigned long) retval);
3654 return retval;
3655 }
3656
3657 static int
3658 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3659 {
3660 int retval;
3661
3662 retval = debug_target.to_stopped_data_address (target, addr);
3663
3664 fprintf_unfiltered (gdb_stdlog,
3665 "target_stopped_data_address ([%s]) = %ld\n",
3666 core_addr_to_string (*addr),
3667 (unsigned long)retval);
3668 return retval;
3669 }
3670
3671 static int
3672 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3673 CORE_ADDR addr,
3674 CORE_ADDR start, int length)
3675 {
3676 int retval;
3677
3678 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3679 start, length);
3680
3681 fprintf_filtered (gdb_stdlog,
3682 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
3683 core_addr_to_string (addr), core_addr_to_string (start),
3684 length, retval);
3685 return retval;
3686 }
3687
3688 static int
3689 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3690 struct bp_target_info *bp_tgt)
3691 {
3692 int retval;
3693
3694 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3695
3696 fprintf_unfiltered (gdb_stdlog,
3697 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
3698 core_addr_to_string (bp_tgt->placed_address),
3699 (unsigned long) retval);
3700 return retval;
3701 }
3702
3703 static int
3704 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3705 struct bp_target_info *bp_tgt)
3706 {
3707 int retval;
3708
3709 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3710
3711 fprintf_unfiltered (gdb_stdlog,
3712 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
3713 core_addr_to_string (bp_tgt->placed_address),
3714 (unsigned long) retval);
3715 return retval;
3716 }
3717
3718 static int
3719 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
3720 struct expression *cond)
3721 {
3722 int retval;
3723
3724 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
3725
3726 fprintf_unfiltered (gdb_stdlog,
3727 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
3728 core_addr_to_string (addr), len, type,
3729 host_address_to_string (cond), (unsigned long) retval);
3730 return retval;
3731 }
3732
3733 static int
3734 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
3735 struct expression *cond)
3736 {
3737 int retval;
3738
3739 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
3740
3741 fprintf_unfiltered (gdb_stdlog,
3742 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
3743 core_addr_to_string (addr), len, type,
3744 host_address_to_string (cond), (unsigned long) retval);
3745 return retval;
3746 }
3747
3748 static void
3749 debug_to_terminal_init (void)
3750 {
3751 debug_target.to_terminal_init ();
3752
3753 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3754 }
3755
3756 static void
3757 debug_to_terminal_inferior (void)
3758 {
3759 debug_target.to_terminal_inferior ();
3760
3761 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3762 }
3763
3764 static void
3765 debug_to_terminal_ours_for_output (void)
3766 {
3767 debug_target.to_terminal_ours_for_output ();
3768
3769 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3770 }
3771
3772 static void
3773 debug_to_terminal_ours (void)
3774 {
3775 debug_target.to_terminal_ours ();
3776
3777 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3778 }
3779
3780 static void
3781 debug_to_terminal_save_ours (void)
3782 {
3783 debug_target.to_terminal_save_ours ();
3784
3785 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3786 }
3787
3788 static void
3789 debug_to_terminal_info (char *arg, int from_tty)
3790 {
3791 debug_target.to_terminal_info (arg, from_tty);
3792
3793 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3794 from_tty);
3795 }
3796
3797 static void
3798 debug_to_load (char *args, int from_tty)
3799 {
3800 debug_target.to_load (args, from_tty);
3801
3802 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3803 }
3804
3805 static void
3806 debug_to_post_startup_inferior (ptid_t ptid)
3807 {
3808 debug_target.to_post_startup_inferior (ptid);
3809
3810 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3811 PIDGET (ptid));
3812 }
3813
3814 static int
3815 debug_to_insert_fork_catchpoint (int pid)
3816 {
3817 int retval;
3818
3819 retval = debug_target.to_insert_fork_catchpoint (pid);
3820
3821 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
3822 pid, retval);
3823
3824 return retval;
3825 }
3826
3827 static int
3828 debug_to_remove_fork_catchpoint (int pid)
3829 {
3830 int retval;
3831
3832 retval = debug_target.to_remove_fork_catchpoint (pid);
3833
3834 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3835 pid, retval);
3836
3837 return retval;
3838 }
3839
3840 static int
3841 debug_to_insert_vfork_catchpoint (int pid)
3842 {
3843 int retval;
3844
3845 retval = debug_target.to_insert_vfork_catchpoint (pid);
3846
3847 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
3848 pid, retval);
3849
3850 return retval;
3851 }
3852
3853 static int
3854 debug_to_remove_vfork_catchpoint (int pid)
3855 {
3856 int retval;
3857
3858 retval = debug_target.to_remove_vfork_catchpoint (pid);
3859
3860 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3861 pid, retval);
3862
3863 return retval;
3864 }
3865
3866 static int
3867 debug_to_insert_exec_catchpoint (int pid)
3868 {
3869 int retval;
3870
3871 retval = debug_target.to_insert_exec_catchpoint (pid);
3872
3873 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
3874 pid, retval);
3875
3876 return retval;
3877 }
3878
3879 static int
3880 debug_to_remove_exec_catchpoint (int pid)
3881 {
3882 int retval;
3883
3884 retval = debug_target.to_remove_exec_catchpoint (pid);
3885
3886 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3887 pid, retval);
3888
3889 return retval;
3890 }
3891
3892 static int
3893 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3894 {
3895 int has_exited;
3896
3897 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3898
3899 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3900 pid, wait_status, *exit_status, has_exited);
3901
3902 return has_exited;
3903 }
3904
3905 static int
3906 debug_to_can_run (void)
3907 {
3908 int retval;
3909
3910 retval = debug_target.to_can_run ();
3911
3912 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3913
3914 return retval;
3915 }
3916
3917 static void
3918 debug_to_notice_signals (ptid_t ptid)
3919 {
3920 debug_target.to_notice_signals (ptid);
3921
3922 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3923 PIDGET (ptid));
3924 }
3925
3926 static struct gdbarch *
3927 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3928 {
3929 struct gdbarch *retval;
3930
3931 retval = debug_target.to_thread_architecture (ops, ptid);
3932
3933 fprintf_unfiltered (gdb_stdlog,
3934 "target_thread_architecture (%s) = %s [%s]\n",
3935 target_pid_to_str (ptid),
3936 host_address_to_string (retval),
3937 gdbarch_bfd_arch_info (retval)->printable_name);
3938 return retval;
3939 }
3940
3941 static void
3942 debug_to_stop (ptid_t ptid)
3943 {
3944 debug_target.to_stop (ptid);
3945
3946 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3947 target_pid_to_str (ptid));
3948 }
3949
3950 static void
3951 debug_to_rcmd (char *command,
3952 struct ui_file *outbuf)
3953 {
3954 debug_target.to_rcmd (command, outbuf);
3955 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3956 }
3957
3958 static char *
3959 debug_to_pid_to_exec_file (int pid)
3960 {
3961 char *exec_file;
3962
3963 exec_file = debug_target.to_pid_to_exec_file (pid);
3964
3965 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3966 pid, exec_file);
3967
3968 return exec_file;
3969 }
3970
3971 static void
3972 setup_target_debug (void)
3973 {
3974 memcpy (&debug_target, &current_target, sizeof debug_target);
3975
3976 current_target.to_open = debug_to_open;
3977 current_target.to_post_attach = debug_to_post_attach;
3978 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3979 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3980 current_target.to_files_info = debug_to_files_info;
3981 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3982 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3983 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3984 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3985 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3986 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3987 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3988 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3989 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3990 current_target.to_watchpoint_addr_within_range
3991 = debug_to_watchpoint_addr_within_range;
3992 current_target.to_region_ok_for_hw_watchpoint
3993 = debug_to_region_ok_for_hw_watchpoint;
3994 current_target.to_can_accel_watchpoint_condition
3995 = debug_to_can_accel_watchpoint_condition;
3996 current_target.to_terminal_init = debug_to_terminal_init;
3997 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3998 current_target.to_terminal_ours_for_output
3999 = debug_to_terminal_ours_for_output;
4000 current_target.to_terminal_ours = debug_to_terminal_ours;
4001 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4002 current_target.to_terminal_info = debug_to_terminal_info;
4003 current_target.to_load = debug_to_load;
4004 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4005 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4006 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4007 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4008 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4009 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4010 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4011 current_target.to_has_exited = debug_to_has_exited;
4012 current_target.to_can_run = debug_to_can_run;
4013 current_target.to_notice_signals = debug_to_notice_signals;
4014 current_target.to_stop = debug_to_stop;
4015 current_target.to_rcmd = debug_to_rcmd;
4016 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4017 current_target.to_thread_architecture = debug_to_thread_architecture;
4018 }
4019 \f
4020
4021 static char targ_desc[] =
4022 "Names of targets and files being debugged.\nShows the entire \
4023 stack of targets currently in use (including the exec-file,\n\
4024 core-file, and process, if any), as well as the symbol file name.";
4025
4026 static void
4027 do_monitor_command (char *cmd,
4028 int from_tty)
4029 {
4030 if ((current_target.to_rcmd
4031 == (void (*) (char *, struct ui_file *)) tcomplain)
4032 || (current_target.to_rcmd == debug_to_rcmd
4033 && (debug_target.to_rcmd
4034 == (void (*) (char *, struct ui_file *)) tcomplain)))
4035 error (_("\"monitor\" command not supported by this target."));
4036 target_rcmd (cmd, gdb_stdtarg);
4037 }
4038
4039 /* Print the name of each layers of our target stack. */
4040
4041 static void
4042 maintenance_print_target_stack (char *cmd, int from_tty)
4043 {
4044 struct target_ops *t;
4045
4046 printf_filtered (_("The current target stack is:\n"));
4047
4048 for (t = target_stack; t != NULL; t = t->beneath)
4049 {
4050 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4051 }
4052 }
4053
4054 /* Controls if async mode is permitted. */
4055 int target_async_permitted = 0;
4056
4057 /* The set command writes to this variable. If the inferior is
4058 executing, linux_nat_async_permitted is *not* updated. */
4059 static int target_async_permitted_1 = 0;
4060
4061 static void
4062 set_maintenance_target_async_permitted (char *args, int from_tty,
4063 struct cmd_list_element *c)
4064 {
4065 if (have_live_inferiors ())
4066 {
4067 target_async_permitted_1 = target_async_permitted;
4068 error (_("Cannot change this setting while the inferior is running."));
4069 }
4070
4071 target_async_permitted = target_async_permitted_1;
4072 }
4073
4074 static void
4075 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
4076 struct cmd_list_element *c,
4077 const char *value)
4078 {
4079 fprintf_filtered (file,
4080 _("Controlling the inferior in "
4081 "asynchronous mode is %s.\n"), value);
4082 }
4083
4084 /* Temporary copies of permission settings. */
4085
4086 static int may_write_registers_1 = 1;
4087 static int may_write_memory_1 = 1;
4088 static int may_insert_breakpoints_1 = 1;
4089 static int may_insert_tracepoints_1 = 1;
4090 static int may_insert_fast_tracepoints_1 = 1;
4091 static int may_stop_1 = 1;
4092
4093 /* Make the user-set values match the real values again. */
4094
4095 void
4096 update_target_permissions (void)
4097 {
4098 may_write_registers_1 = may_write_registers;
4099 may_write_memory_1 = may_write_memory;
4100 may_insert_breakpoints_1 = may_insert_breakpoints;
4101 may_insert_tracepoints_1 = may_insert_tracepoints;
4102 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4103 may_stop_1 = may_stop;
4104 }
4105
4106 /* The one function handles (most of) the permission flags in the same
4107 way. */
4108
4109 static void
4110 set_target_permissions (char *args, int from_tty,
4111 struct cmd_list_element *c)
4112 {
4113 if (target_has_execution)
4114 {
4115 update_target_permissions ();
4116 error (_("Cannot change this setting while the inferior is running."));
4117 }
4118
4119 /* Make the real values match the user-changed values. */
4120 may_write_registers = may_write_registers_1;
4121 may_insert_breakpoints = may_insert_breakpoints_1;
4122 may_insert_tracepoints = may_insert_tracepoints_1;
4123 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4124 may_stop = may_stop_1;
4125 update_observer_mode ();
4126 }
4127
4128 /* Set memory write permission independently of observer mode. */
4129
4130 static void
4131 set_write_memory_permission (char *args, int from_tty,
4132 struct cmd_list_element *c)
4133 {
4134 /* Make the real values match the user-changed values. */
4135 may_write_memory = may_write_memory_1;
4136 update_observer_mode ();
4137 }
4138
4139
4140 void
4141 initialize_targets (void)
4142 {
4143 init_dummy_target ();
4144 push_target (&dummy_target);
4145
4146 add_info ("target", target_info, targ_desc);
4147 add_info ("files", target_info, targ_desc);
4148
4149 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4150 Set target debugging."), _("\
4151 Show target debugging."), _("\
4152 When non-zero, target debugging is enabled. Higher numbers are more\n\
4153 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4154 command."),
4155 NULL,
4156 show_targetdebug,
4157 &setdebuglist, &showdebuglist);
4158
4159 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4160 &trust_readonly, _("\
4161 Set mode for reading from readonly sections."), _("\
4162 Show mode for reading from readonly sections."), _("\
4163 When this mode is on, memory reads from readonly sections (such as .text)\n\
4164 will be read from the object file instead of from the target. This will\n\
4165 result in significant performance improvement for remote targets."),
4166 NULL,
4167 show_trust_readonly,
4168 &setlist, &showlist);
4169
4170 add_com ("monitor", class_obscure, do_monitor_command,
4171 _("Send a command to the remote monitor (remote targets only)."));
4172
4173 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4174 _("Print the name of each layer of the internal target stack."),
4175 &maintenanceprintlist);
4176
4177 add_setshow_boolean_cmd ("target-async", no_class,
4178 &target_async_permitted_1, _("\
4179 Set whether gdb controls the inferior in asynchronous mode."), _("\
4180 Show whether gdb controls the inferior in asynchronous mode."), _("\
4181 Tells gdb whether to control the inferior in asynchronous mode."),
4182 set_maintenance_target_async_permitted,
4183 show_maintenance_target_async_permitted,
4184 &setlist,
4185 &showlist);
4186
4187 add_setshow_boolean_cmd ("stack-cache", class_support,
4188 &stack_cache_enabled_p_1, _("\
4189 Set cache use for stack access."), _("\
4190 Show cache use for stack access."), _("\
4191 When on, use the data cache for all stack access, regardless of any\n\
4192 configured memory regions. This improves remote performance significantly.\n\
4193 By default, caching for stack access is on."),
4194 set_stack_cache_enabled_p,
4195 show_stack_cache_enabled_p,
4196 &setlist, &showlist);
4197
4198 add_setshow_boolean_cmd ("may-write-registers", class_support,
4199 &may_write_registers_1, _("\
4200 Set permission to write into registers."), _("\
4201 Show permission to write into registers."), _("\
4202 When this permission is on, GDB may write into the target's registers.\n\
4203 Otherwise, any sort of write attempt will result in an error."),
4204 set_target_permissions, NULL,
4205 &setlist, &showlist);
4206
4207 add_setshow_boolean_cmd ("may-write-memory", class_support,
4208 &may_write_memory_1, _("\
4209 Set permission to write into target memory."), _("\
4210 Show permission to write into target memory."), _("\
4211 When this permission is on, GDB may write into the target's memory.\n\
4212 Otherwise, any sort of write attempt will result in an error."),
4213 set_write_memory_permission, NULL,
4214 &setlist, &showlist);
4215
4216 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4217 &may_insert_breakpoints_1, _("\
4218 Set permission to insert breakpoints in the target."), _("\
4219 Show permission to insert breakpoints in the target."), _("\
4220 When this permission is on, GDB may insert breakpoints in the program.\n\
4221 Otherwise, any sort of insertion attempt will result in an error."),
4222 set_target_permissions, NULL,
4223 &setlist, &showlist);
4224
4225 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4226 &may_insert_tracepoints_1, _("\
4227 Set permission to insert tracepoints in the target."), _("\
4228 Show permission to insert tracepoints in the target."), _("\
4229 When this permission is on, GDB may insert tracepoints in the program.\n\
4230 Otherwise, any sort of insertion attempt will result in an error."),
4231 set_target_permissions, NULL,
4232 &setlist, &showlist);
4233
4234 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4235 &may_insert_fast_tracepoints_1, _("\
4236 Set permission to insert fast tracepoints in the target."), _("\
4237 Show permission to insert fast tracepoints in the target."), _("\
4238 When this permission is on, GDB may insert fast tracepoints.\n\
4239 Otherwise, any sort of insertion attempt will result in an error."),
4240 set_target_permissions, NULL,
4241 &setlist, &showlist);
4242
4243 add_setshow_boolean_cmd ("may-interrupt", class_support,
4244 &may_stop_1, _("\
4245 Set permission to interrupt or signal the target."), _("\
4246 Show permission to interrupt or signal the target."), _("\
4247 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4248 Otherwise, any attempt to interrupt or stop will be ignored."),
4249 set_target_permissions, NULL,
4250 &setlist, &showlist);
4251
4252
4253 target_dcache = dcache_init ();
4254 }
This page took 0.118614 seconds and 4 git commands to generate.