Add target_ops argument to to_region_ok_for_hw_watchpoint
[deliverable/binutils-gdb.git] / gdb / inf-ttrace.c
1 /* Low-level child interface to ttrace.
2
3 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 /* The ttrace(2) system call didn't exist before HP-UX 10.30. Don't
23 try to compile this code unless we have it. */
24 #ifdef HAVE_TTRACE
25
26 #include "command.h"
27 #include "gdbcore.h"
28 #include "gdbthread.h"
29 #include "inferior.h"
30 #include "terminal.h"
31 #include "target.h"
32
33 #include "gdb_assert.h"
34 #include <string.h>
35 #include <sys/mman.h>
36 #include <sys/ttrace.h>
37 #include <signal.h>
38
39 #include "inf-child.h"
40 #include "inf-ttrace.h"
41 #include "common/filestuff.h"
42
43 \f
44
45 /* HP-UX uses a threading model where each user-space thread
46 corresponds to a kernel thread. These kernel threads are called
47 lwps. The ttrace(2) interface gives us almost full control over
48 the threads, which makes it very easy to support them in GDB. We
49 identify the threads by process ID and lwp ID. The ttrace(2) also
50 provides us with a thread's user ID (in the `tts_user_tid' member
51 of `ttstate_t') but we don't use that (yet) as it isn't necessary
52 to uniquely label the thread. */
53
54 /* Number of active lwps. */
55 static int inf_ttrace_num_lwps;
56 \f
57
58 /* On HP-UX versions that have the ttrace(2) system call, we can
59 implement "hardware" watchpoints by fiddling with the protection of
60 pages in the address space that contain the variable being watched.
61 In order to implement this, we keep a dictionary of pages for which
62 we have changed the protection. */
63
64 struct inf_ttrace_page
65 {
66 CORE_ADDR addr; /* Page address. */
67 int prot; /* Protection. */
68 int refcount; /* Reference count. */
69 struct inf_ttrace_page *next;
70 struct inf_ttrace_page *prev;
71 };
72
73 struct inf_ttrace_page_dict
74 {
75 struct inf_ttrace_page buckets[128];
76 int pagesize; /* Page size. */
77 int count; /* Number of pages in this dictionary. */
78 } inf_ttrace_page_dict;
79
80 struct inf_ttrace_private_thread_info
81 {
82 int dying;
83 };
84
85 /* Number of lwps that are currently in a system call. */
86 static int inf_ttrace_num_lwps_in_syscall;
87
88 /* Flag to indicate whether we should re-enable page protections after
89 the next wait. */
90 static int inf_ttrace_reenable_page_protections;
91
92 /* Enable system call events for process PID. */
93
94 static void
95 inf_ttrace_enable_syscall_events (pid_t pid)
96 {
97 ttevent_t tte;
98 ttstate_t tts;
99
100 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
101
102 if (ttrace (TT_PROC_GET_EVENT_MASK, pid, 0,
103 (uintptr_t)&tte, sizeof tte, 0) == -1)
104 perror_with_name (("ttrace"));
105
106 tte.tte_events |= (TTEVT_SYSCALL_ENTRY | TTEVT_SYSCALL_RETURN);
107
108 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
109 (uintptr_t)&tte, sizeof tte, 0) == -1)
110 perror_with_name (("ttrace"));
111
112 if (ttrace (TT_PROC_GET_FIRST_LWP_STATE, pid, 0,
113 (uintptr_t)&tts, sizeof tts, 0) == -1)
114 perror_with_name (("ttrace"));
115
116 if (tts.tts_flags & TTS_INSYSCALL)
117 inf_ttrace_num_lwps_in_syscall++;
118
119 /* FIXME: Handle multiple threads. */
120 }
121
122 /* Disable system call events for process PID. */
123
124 static void
125 inf_ttrace_disable_syscall_events (pid_t pid)
126 {
127 ttevent_t tte;
128
129 gdb_assert (inf_ttrace_page_dict.count == 0);
130
131 if (ttrace (TT_PROC_GET_EVENT_MASK, pid, 0,
132 (uintptr_t)&tte, sizeof tte, 0) == -1)
133 perror_with_name (("ttrace"));
134
135 tte.tte_events &= ~(TTEVT_SYSCALL_ENTRY | TTEVT_SYSCALL_RETURN);
136
137 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
138 (uintptr_t)&tte, sizeof tte, 0) == -1)
139 perror_with_name (("ttrace"));
140
141 inf_ttrace_num_lwps_in_syscall = 0;
142 }
143
144 /* Get information about the page at address ADDR for process PID from
145 the dictionary. */
146
147 static struct inf_ttrace_page *
148 inf_ttrace_get_page (pid_t pid, CORE_ADDR addr)
149 {
150 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
151 const int pagesize = inf_ttrace_page_dict.pagesize;
152 int bucket;
153 struct inf_ttrace_page *page;
154
155 bucket = (addr / pagesize) % num_buckets;
156 page = &inf_ttrace_page_dict.buckets[bucket];
157 while (page)
158 {
159 if (page->addr == addr)
160 break;
161
162 page = page->next;
163 }
164
165 return page;
166 }
167
168 /* Add the page at address ADDR for process PID to the dictionary. */
169
170 static struct inf_ttrace_page *
171 inf_ttrace_add_page (pid_t pid, CORE_ADDR addr)
172 {
173 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
174 const int pagesize = inf_ttrace_page_dict.pagesize;
175 int bucket;
176 struct inf_ttrace_page *page;
177 struct inf_ttrace_page *prev = NULL;
178
179 bucket = (addr / pagesize) % num_buckets;
180 page = &inf_ttrace_page_dict.buckets[bucket];
181 while (page)
182 {
183 if (page->addr == addr)
184 break;
185
186 prev = page;
187 page = page->next;
188 }
189
190 if (!page)
191 {
192 int prot;
193
194 if (ttrace (TT_PROC_GET_MPROTECT, pid, 0,
195 addr, 0, (uintptr_t)&prot) == -1)
196 perror_with_name (("ttrace"));
197
198 page = XNEW (struct inf_ttrace_page);
199 page->addr = addr;
200 page->prot = prot;
201 page->refcount = 0;
202 page->next = NULL;
203
204 page->prev = prev;
205 prev->next = page;
206
207 inf_ttrace_page_dict.count++;
208 if (inf_ttrace_page_dict.count == 1)
209 inf_ttrace_enable_syscall_events (pid);
210
211 if (inf_ttrace_num_lwps_in_syscall == 0)
212 {
213 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
214 addr, pagesize, prot & ~PROT_WRITE) == -1)
215 perror_with_name (("ttrace"));
216 }
217 }
218
219 return page;
220 }
221
222 /* Insert the page at address ADDR of process PID to the dictionary. */
223
224 static void
225 inf_ttrace_insert_page (pid_t pid, CORE_ADDR addr)
226 {
227 struct inf_ttrace_page *page;
228
229 page = inf_ttrace_get_page (pid, addr);
230 if (!page)
231 page = inf_ttrace_add_page (pid, addr);
232
233 page->refcount++;
234 }
235
236 /* Remove the page at address ADDR of process PID from the dictionary. */
237
238 static void
239 inf_ttrace_remove_page (pid_t pid, CORE_ADDR addr)
240 {
241 const int pagesize = inf_ttrace_page_dict.pagesize;
242 struct inf_ttrace_page *page;
243
244 page = inf_ttrace_get_page (pid, addr);
245 page->refcount--;
246
247 gdb_assert (page->refcount >= 0);
248
249 if (page->refcount == 0)
250 {
251 if (inf_ttrace_num_lwps_in_syscall == 0)
252 {
253 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
254 addr, pagesize, page->prot) == -1)
255 perror_with_name (("ttrace"));
256 }
257
258 inf_ttrace_page_dict.count--;
259 if (inf_ttrace_page_dict.count == 0)
260 inf_ttrace_disable_syscall_events (pid);
261
262 page->prev->next = page->next;
263 if (page->next)
264 page->next->prev = page->prev;
265
266 xfree (page);
267 }
268 }
269
270 /* Mask the bits in PROT from the page protections that are currently
271 in the dictionary for process PID. */
272
273 static void
274 inf_ttrace_mask_page_protections (pid_t pid, int prot)
275 {
276 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
277 const int pagesize = inf_ttrace_page_dict.pagesize;
278 int bucket;
279
280 for (bucket = 0; bucket < num_buckets; bucket++)
281 {
282 struct inf_ttrace_page *page;
283
284 page = inf_ttrace_page_dict.buckets[bucket].next;
285 while (page)
286 {
287 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
288 page->addr, pagesize, page->prot & ~prot) == -1)
289 perror_with_name (("ttrace"));
290
291 page = page->next;
292 }
293 }
294 }
295
296 /* Write-protect the pages in the dictionary for process PID. */
297
298 static void
299 inf_ttrace_enable_page_protections (pid_t pid)
300 {
301 inf_ttrace_mask_page_protections (pid, PROT_WRITE);
302 }
303
304 /* Restore the protection of the pages in the dictionary for process
305 PID. */
306
307 static void
308 inf_ttrace_disable_page_protections (pid_t pid)
309 {
310 inf_ttrace_mask_page_protections (pid, 0);
311 }
312
313 /* Insert a "hardware" watchpoint for LEN bytes at address ADDR of
314 type TYPE. */
315
316 static int
317 inf_ttrace_insert_watchpoint (struct target_ops *self,
318 CORE_ADDR addr, int len, int type,
319 struct expression *cond)
320 {
321 const int pagesize = inf_ttrace_page_dict.pagesize;
322 pid_t pid = ptid_get_pid (inferior_ptid);
323 CORE_ADDR page_addr;
324 int num_pages;
325 int page;
326
327 gdb_assert (type == hw_write);
328
329 page_addr = (addr / pagesize) * pagesize;
330 num_pages = (len + pagesize - 1) / pagesize;
331
332 for (page = 0; page < num_pages; page++, page_addr += pagesize)
333 inf_ttrace_insert_page (pid, page_addr);
334
335 return 1;
336 }
337
338 /* Remove a "hardware" watchpoint for LEN bytes at address ADDR of
339 type TYPE. */
340
341 static int
342 inf_ttrace_remove_watchpoint (struct target_ops *self,
343 CORE_ADDR addr, int len, int type,
344 struct expression *cond)
345 {
346 const int pagesize = inf_ttrace_page_dict.pagesize;
347 pid_t pid = ptid_get_pid (inferior_ptid);
348 CORE_ADDR page_addr;
349 int num_pages;
350 int page;
351
352 gdb_assert (type == hw_write);
353
354 page_addr = (addr / pagesize) * pagesize;
355 num_pages = (len + pagesize - 1) / pagesize;
356
357 for (page = 0; page < num_pages; page++, page_addr += pagesize)
358 inf_ttrace_remove_page (pid, page_addr);
359
360 return 1;
361 }
362
363 static int
364 inf_ttrace_can_use_hw_breakpoint (struct target_ops *self,
365 int type, int len, int ot)
366 {
367 return (type == bp_hardware_watchpoint);
368 }
369
370 static int
371 inf_ttrace_region_ok_for_hw_watchpoint (struct target_ops *self,
372 CORE_ADDR addr, int len)
373 {
374 return 1;
375 }
376
377 /* Return non-zero if the current inferior was (potentially) stopped
378 by hitting a "hardware" watchpoint. */
379
380 static int
381 inf_ttrace_stopped_by_watchpoint (struct target_ops *ops)
382 {
383 pid_t pid = ptid_get_pid (inferior_ptid);
384 lwpid_t lwpid = ptid_get_lwp (inferior_ptid);
385 ttstate_t tts;
386
387 if (inf_ttrace_page_dict.count > 0)
388 {
389 if (ttrace (TT_LWP_GET_STATE, pid, lwpid,
390 (uintptr_t)&tts, sizeof tts, 0) == -1)
391 perror_with_name (("ttrace"));
392
393 if (tts.tts_event == TTEVT_SIGNAL
394 && tts.tts_u.tts_signal.tts_signo == SIGBUS)
395 {
396 const int pagesize = inf_ttrace_page_dict.pagesize;
397 void *addr = tts.tts_u.tts_signal.tts_siginfo.si_addr;
398 CORE_ADDR page_addr = ((uintptr_t)addr / pagesize) * pagesize;
399
400 if (inf_ttrace_get_page (pid, page_addr))
401 return 1;
402 }
403 }
404
405 return 0;
406 }
407 \f
408
409 /* When tracking a vfork(2), we cannot detach from the parent until
410 after the child has called exec(3) or has exited. If we are still
411 attached to the parent, this variable will be set to the process ID
412 of the parent. Otherwise it will be set to zero. */
413 static pid_t inf_ttrace_vfork_ppid = -1;
414
415 static int
416 inf_ttrace_follow_fork (struct target_ops *ops, int follow_child,
417 int detach_fork)
418 {
419 pid_t pid, fpid;
420 lwpid_t lwpid, flwpid;
421 ttstate_t tts;
422 struct thread_info *tp = inferior_thread ();
423
424 gdb_assert (tp->pending_follow.kind == TARGET_WAITKIND_FORKED
425 || tp->pending_follow.kind == TARGET_WAITKIND_VFORKED);
426
427 pid = ptid_get_pid (inferior_ptid);
428 lwpid = ptid_get_lwp (inferior_ptid);
429
430 /* Get all important details that core GDB doesn't (and shouldn't)
431 know about. */
432 if (ttrace (TT_LWP_GET_STATE, pid, lwpid,
433 (uintptr_t)&tts, sizeof tts, 0) == -1)
434 perror_with_name (("ttrace"));
435
436 gdb_assert (tts.tts_event == TTEVT_FORK || tts.tts_event == TTEVT_VFORK);
437
438 if (tts.tts_u.tts_fork.tts_isparent)
439 {
440 pid = tts.tts_pid;
441 lwpid = tts.tts_lwpid;
442 fpid = tts.tts_u.tts_fork.tts_fpid;
443 flwpid = tts.tts_u.tts_fork.tts_flwpid;
444 }
445 else
446 {
447 pid = tts.tts_u.tts_fork.tts_fpid;
448 lwpid = tts.tts_u.tts_fork.tts_flwpid;
449 fpid = tts.tts_pid;
450 flwpid = tts.tts_lwpid;
451 }
452
453 if (follow_child)
454 {
455 struct inferior *inf;
456 struct inferior *parent_inf;
457
458 parent_inf = find_inferior_pid (pid);
459
460 inferior_ptid = ptid_build (fpid, flwpid, 0);
461 inf = add_inferior (fpid);
462 inf->attach_flag = parent_inf->attach_flag;
463 inf->pspace = parent_inf->pspace;
464 inf->aspace = parent_inf->aspace;
465 copy_terminal_info (inf, parent_inf);
466 detach_breakpoints (ptid_build (pid, lwpid, 0));
467
468 target_terminal_ours ();
469 fprintf_unfiltered (gdb_stdlog,
470 _("Attaching after fork to child process %ld.\n"),
471 (long)fpid);
472 }
473 else
474 {
475 inferior_ptid = ptid_build (pid, lwpid, 0);
476 /* Detach any remaining breakpoints in the child. In the case
477 of fork events, we do not need to do this, because breakpoints
478 should have already been removed earlier. */
479 if (tts.tts_event == TTEVT_VFORK)
480 detach_breakpoints (ptid_build (fpid, flwpid, 0));
481
482 target_terminal_ours ();
483 fprintf_unfiltered (gdb_stdlog,
484 _("Detaching after fork from child process %ld.\n"),
485 (long)fpid);
486 }
487
488 if (tts.tts_event == TTEVT_VFORK)
489 {
490 gdb_assert (!tts.tts_u.tts_fork.tts_isparent);
491
492 if (follow_child)
493 {
494 /* We can't detach from the parent yet. */
495 inf_ttrace_vfork_ppid = pid;
496
497 reattach_breakpoints (fpid);
498 }
499 else
500 {
501 if (ttrace (TT_PROC_DETACH, fpid, 0, 0, 0, 0) == -1)
502 perror_with_name (("ttrace"));
503
504 /* Wait till we get the TTEVT_VFORK event in the parent.
505 This indicates that the child has called exec(3) or has
506 exited and that the parent is ready to be traced again. */
507 if (ttrace_wait (pid, lwpid, TTRACE_WAITOK, &tts, sizeof tts) == -1)
508 perror_with_name (("ttrace_wait"));
509 gdb_assert (tts.tts_event == TTEVT_VFORK);
510 gdb_assert (tts.tts_u.tts_fork.tts_isparent);
511
512 reattach_breakpoints (pid);
513 }
514 }
515 else
516 {
517 gdb_assert (tts.tts_u.tts_fork.tts_isparent);
518
519 if (follow_child)
520 {
521 if (ttrace (TT_PROC_DETACH, pid, 0, 0, 0, 0) == -1)
522 perror_with_name (("ttrace"));
523 }
524 else
525 {
526 if (ttrace (TT_PROC_DETACH, fpid, 0, 0, 0, 0) == -1)
527 perror_with_name (("ttrace"));
528 }
529 }
530
531 if (follow_child)
532 {
533 struct thread_info *ti;
534
535 /* The child will start out single-threaded. */
536 inf_ttrace_num_lwps = 1;
537 inf_ttrace_num_lwps_in_syscall = 0;
538
539 /* Delete parent. */
540 delete_thread_silent (ptid_build (pid, lwpid, 0));
541 detach_inferior (pid);
542
543 /* Add child thread. inferior_ptid was already set above. */
544 ti = add_thread_silent (inferior_ptid);
545 ti->private =
546 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
547 memset (ti->private, 0,
548 sizeof (struct inf_ttrace_private_thread_info));
549 }
550
551 return 0;
552 }
553 \f
554
555 /* File descriptors for pipes used as semaphores during initial
556 startup of an inferior. */
557 static int inf_ttrace_pfd1[2];
558 static int inf_ttrace_pfd2[2];
559
560 static void
561 do_cleanup_pfds (void *dummy)
562 {
563 close (inf_ttrace_pfd1[0]);
564 close (inf_ttrace_pfd1[1]);
565 close (inf_ttrace_pfd2[0]);
566 close (inf_ttrace_pfd2[1]);
567
568 unmark_fd_no_cloexec (inf_ttrace_pfd1[0]);
569 unmark_fd_no_cloexec (inf_ttrace_pfd1[1]);
570 unmark_fd_no_cloexec (inf_ttrace_pfd2[0]);
571 unmark_fd_no_cloexec (inf_ttrace_pfd2[1]);
572 }
573
574 static void
575 inf_ttrace_prepare (void)
576 {
577 if (pipe (inf_ttrace_pfd1) == -1)
578 perror_with_name (("pipe"));
579
580 if (pipe (inf_ttrace_pfd2) == -1)
581 {
582 close (inf_ttrace_pfd1[0]);
583 close (inf_ttrace_pfd2[0]);
584 perror_with_name (("pipe"));
585 }
586
587 mark_fd_no_cloexec (inf_ttrace_pfd1[0]);
588 mark_fd_no_cloexec (inf_ttrace_pfd1[1]);
589 mark_fd_no_cloexec (inf_ttrace_pfd2[0]);
590 mark_fd_no_cloexec (inf_ttrace_pfd2[1]);
591 }
592
593 /* Prepare to be traced. */
594
595 static void
596 inf_ttrace_me (void)
597 {
598 struct cleanup *old_chain = make_cleanup (do_cleanup_pfds, 0);
599 char c;
600
601 /* "Trace me, Dr. Memory!" */
602 if (ttrace (TT_PROC_SETTRC, 0, 0, 0, TT_VERSION, 0) == -1)
603 perror_with_name (("ttrace"));
604
605 /* Tell our parent that we are ready to be traced. */
606 if (write (inf_ttrace_pfd1[1], &c, sizeof c) != sizeof c)
607 perror_with_name (("write"));
608
609 /* Wait until our parent has set the initial event mask. */
610 if (read (inf_ttrace_pfd2[0], &c, sizeof c) != sizeof c)
611 perror_with_name (("read"));
612
613 do_cleanups (old_chain);
614 }
615
616 /* Start tracing PID. */
617
618 static void
619 inf_ttrace_him (struct target_ops *ops, int pid)
620 {
621 struct cleanup *old_chain = make_cleanup (do_cleanup_pfds, 0);
622 ttevent_t tte;
623 char c;
624
625 /* Wait until our child is ready to be traced. */
626 if (read (inf_ttrace_pfd1[0], &c, sizeof c) != sizeof c)
627 perror_with_name (("read"));
628
629 /* Set the initial event mask. */
630 memset (&tte, 0, sizeof (tte));
631 tte.tte_events |= TTEVT_EXEC | TTEVT_EXIT | TTEVT_FORK | TTEVT_VFORK;
632 tte.tte_events |= TTEVT_LWP_CREATE | TTEVT_LWP_EXIT | TTEVT_LWP_TERMINATE;
633 #ifdef TTEVT_BPT_SSTEP
634 tte.tte_events |= TTEVT_BPT_SSTEP;
635 #endif
636 tte.tte_opts |= TTEO_PROC_INHERIT;
637 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
638 (uintptr_t)&tte, sizeof tte, 0) == -1)
639 perror_with_name (("ttrace"));
640
641 /* Tell our child that we have set the initial event mask. */
642 if (write (inf_ttrace_pfd2[1], &c, sizeof c) != sizeof c)
643 perror_with_name (("write"));
644
645 do_cleanups (old_chain);
646
647 push_target (ops);
648
649 startup_inferior (START_INFERIOR_TRAPS_EXPECTED);
650
651 /* On some targets, there must be some explicit actions taken after
652 the inferior has been started up. */
653 target_post_startup_inferior (pid_to_ptid (pid));
654 }
655
656 static void
657 inf_ttrace_create_inferior (struct target_ops *ops, char *exec_file,
658 char *allargs, char **env, int from_tty)
659 {
660 int pid;
661
662 gdb_assert (inf_ttrace_num_lwps == 0);
663 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
664 gdb_assert (inf_ttrace_page_dict.count == 0);
665 gdb_assert (inf_ttrace_reenable_page_protections == 0);
666 gdb_assert (inf_ttrace_vfork_ppid == -1);
667
668 pid = fork_inferior (exec_file, allargs, env, inf_ttrace_me, NULL,
669 inf_ttrace_prepare, NULL, NULL);
670
671 inf_ttrace_him (ops, pid);
672 }
673
674 static void
675 inf_ttrace_mourn_inferior (struct target_ops *ops)
676 {
677 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
678 int bucket;
679
680 inf_ttrace_num_lwps = 0;
681 inf_ttrace_num_lwps_in_syscall = 0;
682
683 for (bucket = 0; bucket < num_buckets; bucket++)
684 {
685 struct inf_ttrace_page *page;
686 struct inf_ttrace_page *next;
687
688 page = inf_ttrace_page_dict.buckets[bucket].next;
689 while (page)
690 {
691 next = page->next;
692 xfree (page);
693 page = next;
694 }
695 }
696 inf_ttrace_page_dict.count = 0;
697
698 unpush_target (ops);
699 generic_mourn_inferior ();
700 }
701
702 /* Assuming we just attached the debugger to a new inferior, create
703 a new thread_info structure for each thread, and add it to our
704 list of threads. */
705
706 static void
707 inf_ttrace_create_threads_after_attach (int pid)
708 {
709 int status;
710 ptid_t ptid;
711 ttstate_t tts;
712 struct thread_info *ti;
713
714 status = ttrace (TT_PROC_GET_FIRST_LWP_STATE, pid, 0,
715 (uintptr_t) &tts, sizeof (ttstate_t), 0);
716 if (status < 0)
717 perror_with_name (_("TT_PROC_GET_FIRST_LWP_STATE ttrace call failed"));
718 gdb_assert (tts.tts_pid == pid);
719
720 /* Add the stopped thread. */
721 ptid = ptid_build (pid, tts.tts_lwpid, 0);
722 ti = add_thread (ptid);
723 ti->private = xzalloc (sizeof (struct inf_ttrace_private_thread_info));
724 inf_ttrace_num_lwps++;
725
726 /* We use the "first stopped thread" as the currently active thread. */
727 inferior_ptid = ptid;
728
729 /* Iterative over all the remaining threads. */
730
731 for (;;)
732 {
733 ptid_t ptid;
734
735 status = ttrace (TT_PROC_GET_NEXT_LWP_STATE, pid, 0,
736 (uintptr_t) &tts, sizeof (ttstate_t), 0);
737 if (status < 0)
738 perror_with_name (_("TT_PROC_GET_NEXT_LWP_STATE ttrace call failed"));
739 if (status == 0)
740 break; /* End of list. */
741
742 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
743 ti = add_thread (ptid);
744 ti->private = xzalloc (sizeof (struct inf_ttrace_private_thread_info));
745 inf_ttrace_num_lwps++;
746 }
747 }
748
749 static void
750 inf_ttrace_attach (struct target_ops *ops, char *args, int from_tty)
751 {
752 char *exec_file;
753 pid_t pid;
754 ttevent_t tte;
755 struct inferior *inf;
756
757 pid = parse_pid_to_attach (args);
758
759 if (pid == getpid ()) /* Trying to masturbate? */
760 error (_("I refuse to debug myself!"));
761
762 if (from_tty)
763 {
764 exec_file = get_exec_file (0);
765
766 if (exec_file)
767 printf_unfiltered (_("Attaching to program: %s, %s\n"), exec_file,
768 target_pid_to_str (pid_to_ptid (pid)));
769 else
770 printf_unfiltered (_("Attaching to %s\n"),
771 target_pid_to_str (pid_to_ptid (pid)));
772
773 gdb_flush (gdb_stdout);
774 }
775
776 gdb_assert (inf_ttrace_num_lwps == 0);
777 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
778 gdb_assert (inf_ttrace_vfork_ppid == -1);
779
780 if (ttrace (TT_PROC_ATTACH, pid, 0, TT_KILL_ON_EXIT, TT_VERSION, 0) == -1)
781 perror_with_name (("ttrace"));
782
783 inf = current_inferior ();
784 inferior_appeared (inf, pid);
785 inf->attach_flag = 1;
786
787 /* Set the initial event mask. */
788 memset (&tte, 0, sizeof (tte));
789 tte.tte_events |= TTEVT_EXEC | TTEVT_EXIT | TTEVT_FORK | TTEVT_VFORK;
790 tte.tte_events |= TTEVT_LWP_CREATE | TTEVT_LWP_EXIT | TTEVT_LWP_TERMINATE;
791 #ifdef TTEVT_BPT_SSTEP
792 tte.tte_events |= TTEVT_BPT_SSTEP;
793 #endif
794 tte.tte_opts |= TTEO_PROC_INHERIT;
795 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
796 (uintptr_t)&tte, sizeof tte, 0) == -1)
797 perror_with_name (("ttrace"));
798
799 push_target (ops);
800
801 inf_ttrace_create_threads_after_attach (pid);
802 }
803
804 static void
805 inf_ttrace_detach (struct target_ops *ops, const char *args, int from_tty)
806 {
807 pid_t pid = ptid_get_pid (inferior_ptid);
808 int sig = 0;
809
810 if (from_tty)
811 {
812 char *exec_file = get_exec_file (0);
813 if (exec_file == 0)
814 exec_file = "";
815 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
816 target_pid_to_str (pid_to_ptid (pid)));
817 gdb_flush (gdb_stdout);
818 }
819 if (args)
820 sig = atoi (args);
821
822 /* ??? The HP-UX 11.0 ttrace(2) manual page doesn't mention that we
823 can pass a signal number here. Does this really work? */
824 if (ttrace (TT_PROC_DETACH, pid, 0, 0, sig, 0) == -1)
825 perror_with_name (("ttrace"));
826
827 if (inf_ttrace_vfork_ppid != -1)
828 {
829 if (ttrace (TT_PROC_DETACH, inf_ttrace_vfork_ppid, 0, 0, 0, 0) == -1)
830 perror_with_name (("ttrace"));
831 inf_ttrace_vfork_ppid = -1;
832 }
833
834 inf_ttrace_num_lwps = 0;
835 inf_ttrace_num_lwps_in_syscall = 0;
836
837 inferior_ptid = null_ptid;
838 detach_inferior (pid);
839
840 unpush_target (ops);
841 }
842
843 static void
844 inf_ttrace_kill (struct target_ops *ops)
845 {
846 pid_t pid = ptid_get_pid (inferior_ptid);
847
848 if (pid == 0)
849 return;
850
851 if (ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0) == -1)
852 perror_with_name (("ttrace"));
853 /* ??? Is it necessary to call ttrace_wait() here? */
854
855 if (inf_ttrace_vfork_ppid != -1)
856 {
857 if (ttrace (TT_PROC_DETACH, inf_ttrace_vfork_ppid, 0, 0, 0, 0) == -1)
858 perror_with_name (("ttrace"));
859 inf_ttrace_vfork_ppid = -1;
860 }
861
862 target_mourn_inferior ();
863 }
864
865 /* Check is a dying thread is dead by now, and delete it from GDBs
866 thread list if so. */
867 static int
868 inf_ttrace_delete_dead_threads_callback (struct thread_info *info, void *arg)
869 {
870 lwpid_t lwpid;
871 struct inf_ttrace_private_thread_info *p;
872
873 if (is_exited (info->ptid))
874 return 0;
875
876 lwpid = ptid_get_lwp (info->ptid);
877 p = (struct inf_ttrace_private_thread_info *) info->private;
878
879 /* Check if an lwp that was dying is still there or not. */
880 if (p->dying && (kill (lwpid, 0) == -1))
881 /* It's gone now. */
882 delete_thread (info->ptid);
883
884 return 0;
885 }
886
887 /* Resume the lwp pointed to by INFO, with REQUEST, and pass it signal
888 SIG. */
889
890 static void
891 inf_ttrace_resume_lwp (struct thread_info *info, ttreq_t request, int sig)
892 {
893 pid_t pid = ptid_get_pid (info->ptid);
894 lwpid_t lwpid = ptid_get_lwp (info->ptid);
895
896 if (ttrace (request, pid, lwpid, TT_NOPC, sig, 0) == -1)
897 {
898 struct inf_ttrace_private_thread_info *p
899 = (struct inf_ttrace_private_thread_info *) info->private;
900 if (p->dying && errno == EPROTO)
901 /* This is expected, it means the dying lwp is really gone
902 by now. If ttrace had an event to inform the debugger
903 the lwp is really gone, this wouldn't be needed. */
904 delete_thread (info->ptid);
905 else
906 /* This was really unexpected. */
907 perror_with_name (("ttrace"));
908 }
909 }
910
911 /* Callback for iterate_over_threads. */
912
913 static int
914 inf_ttrace_resume_callback (struct thread_info *info, void *arg)
915 {
916 if (!ptid_equal (info->ptid, inferior_ptid) && !is_exited (info->ptid))
917 inf_ttrace_resume_lwp (info, TT_LWP_CONTINUE, 0);
918
919 return 0;
920 }
921
922 static void
923 inf_ttrace_resume (struct target_ops *ops,
924 ptid_t ptid, int step, enum gdb_signal signal)
925 {
926 int resume_all;
927 ttreq_t request = step ? TT_LWP_SINGLE : TT_LWP_CONTINUE;
928 int sig = gdb_signal_to_host (signal);
929 struct thread_info *info;
930
931 /* A specific PTID means `step only this process id'. */
932 resume_all = (ptid_equal (ptid, minus_one_ptid));
933
934 /* If resuming all threads, it's the current thread that should be
935 handled specially. */
936 if (resume_all)
937 ptid = inferior_ptid;
938
939 info = find_thread_ptid (ptid);
940 inf_ttrace_resume_lwp (info, request, sig);
941
942 if (resume_all)
943 /* Let all the other threads run too. */
944 iterate_over_threads (inf_ttrace_resume_callback, NULL);
945 }
946
947 static ptid_t
948 inf_ttrace_wait (struct target_ops *ops,
949 ptid_t ptid, struct target_waitstatus *ourstatus, int options)
950 {
951 pid_t pid = ptid_get_pid (ptid);
952 lwpid_t lwpid = ptid_get_lwp (ptid);
953 ttstate_t tts;
954 struct thread_info *ti;
955 ptid_t related_ptid;
956
957 /* Until proven otherwise. */
958 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
959
960 if (pid == -1)
961 pid = lwpid = 0;
962
963 gdb_assert (pid != 0 || lwpid == 0);
964
965 do
966 {
967 set_sigint_trap ();
968
969 if (ttrace_wait (pid, lwpid, TTRACE_WAITOK, &tts, sizeof tts) == -1)
970 perror_with_name (("ttrace_wait"));
971
972 if (tts.tts_event == TTEVT_VFORK && tts.tts_u.tts_fork.tts_isparent)
973 {
974 if (inf_ttrace_vfork_ppid != -1)
975 {
976 gdb_assert (inf_ttrace_vfork_ppid == tts.tts_pid);
977
978 if (ttrace (TT_PROC_DETACH, tts.tts_pid, 0, 0, 0, 0) == -1)
979 perror_with_name (("ttrace"));
980 inf_ttrace_vfork_ppid = -1;
981 }
982
983 tts.tts_event = TTEVT_NONE;
984 }
985
986 clear_sigint_trap ();
987 }
988 while (tts.tts_event == TTEVT_NONE);
989
990 /* Now that we've waited, we can re-enable the page protections. */
991 if (inf_ttrace_reenable_page_protections)
992 {
993 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
994 inf_ttrace_enable_page_protections (tts.tts_pid);
995 inf_ttrace_reenable_page_protections = 0;
996 }
997
998 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
999
1000 if (inf_ttrace_num_lwps == 0)
1001 {
1002 struct thread_info *ti;
1003
1004 inf_ttrace_num_lwps = 1;
1005
1006 /* This is the earliest we hear about the lwp member of
1007 INFERIOR_PTID, after an attach or fork_inferior. */
1008 gdb_assert (ptid_get_lwp (inferior_ptid) == 0);
1009
1010 /* We haven't set the private member on the main thread yet. Do
1011 it now. */
1012 ti = find_thread_ptid (inferior_ptid);
1013 gdb_assert (ti != NULL && ti->private == NULL);
1014 ti->private =
1015 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
1016 memset (ti->private, 0,
1017 sizeof (struct inf_ttrace_private_thread_info));
1018
1019 /* Notify the core that this ptid changed. This changes
1020 inferior_ptid as well. */
1021 thread_change_ptid (inferior_ptid, ptid);
1022 }
1023
1024 switch (tts.tts_event)
1025 {
1026 #ifdef TTEVT_BPT_SSTEP
1027 case TTEVT_BPT_SSTEP:
1028 /* Make it look like a breakpoint. */
1029 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1030 ourstatus->value.sig = GDB_SIGNAL_TRAP;
1031 break;
1032 #endif
1033
1034 case TTEVT_EXEC:
1035 ourstatus->kind = TARGET_WAITKIND_EXECD;
1036 ourstatus->value.execd_pathname =
1037 xmalloc (tts.tts_u.tts_exec.tts_pathlen + 1);
1038 if (ttrace (TT_PROC_GET_PATHNAME, tts.tts_pid, 0,
1039 (uintptr_t)ourstatus->value.execd_pathname,
1040 tts.tts_u.tts_exec.tts_pathlen, 0) == -1)
1041 perror_with_name (("ttrace"));
1042 ourstatus->value.execd_pathname[tts.tts_u.tts_exec.tts_pathlen] = 0;
1043
1044 /* At this point, all inserted breakpoints are gone. Doing this
1045 as soon as we detect an exec prevents the badness of deleting
1046 a breakpoint writing the current "shadow contents" to lift
1047 the bp. That shadow is NOT valid after an exec. */
1048 mark_breakpoints_out ();
1049 break;
1050
1051 case TTEVT_EXIT:
1052 store_waitstatus (ourstatus, tts.tts_u.tts_exit.tts_exitcode);
1053 inf_ttrace_num_lwps = 0;
1054 break;
1055
1056 case TTEVT_FORK:
1057 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1058 tts.tts_u.tts_fork.tts_flwpid, 0);
1059
1060 ourstatus->kind = TARGET_WAITKIND_FORKED;
1061 ourstatus->value.related_pid = related_ptid;
1062
1063 /* Make sure the other end of the fork is stopped too. */
1064 if (ttrace_wait (tts.tts_u.tts_fork.tts_fpid,
1065 tts.tts_u.tts_fork.tts_flwpid,
1066 TTRACE_WAITOK, &tts, sizeof tts) == -1)
1067 perror_with_name (("ttrace_wait"));
1068
1069 gdb_assert (tts.tts_event == TTEVT_FORK);
1070 if (tts.tts_u.tts_fork.tts_isparent)
1071 {
1072 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1073 tts.tts_u.tts_fork.tts_flwpid, 0);
1074 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1075 ourstatus->value.related_pid = related_ptid;
1076 }
1077 break;
1078
1079 case TTEVT_VFORK:
1080 gdb_assert (!tts.tts_u.tts_fork.tts_isparent);
1081
1082 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1083 tts.tts_u.tts_fork.tts_flwpid, 0);
1084
1085 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1086 ourstatus->value.related_pid = related_ptid;
1087
1088 /* HACK: To avoid touching the parent during the vfork, switch
1089 away from it. */
1090 inferior_ptid = ptid;
1091 break;
1092
1093 case TTEVT_LWP_CREATE:
1094 lwpid = tts.tts_u.tts_thread.tts_target_lwpid;
1095 ptid = ptid_build (tts.tts_pid, lwpid, 0);
1096 ti = add_thread (ptid);
1097 ti->private =
1098 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
1099 memset (ti->private, 0,
1100 sizeof (struct inf_ttrace_private_thread_info));
1101 inf_ttrace_num_lwps++;
1102 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1103 /* Let the lwp_create-caller thread continue. */
1104 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1105 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1106 /* Return without stopping the whole process. */
1107 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1108 return ptid;
1109
1110 case TTEVT_LWP_EXIT:
1111 if (print_thread_events)
1112 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (ptid));
1113 ti = find_thread_ptid (ptid);
1114 gdb_assert (ti != NULL);
1115 ((struct inf_ttrace_private_thread_info *)ti->private)->dying = 1;
1116 inf_ttrace_num_lwps--;
1117 /* Let the thread really exit. */
1118 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1119 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1120 /* Return without stopping the whole process. */
1121 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1122 return ptid;
1123
1124 case TTEVT_LWP_TERMINATE:
1125 lwpid = tts.tts_u.tts_thread.tts_target_lwpid;
1126 ptid = ptid_build (tts.tts_pid, lwpid, 0);
1127 if (print_thread_events)
1128 printf_unfiltered(_("[%s has been terminated]\n"),
1129 target_pid_to_str (ptid));
1130 ti = find_thread_ptid (ptid);
1131 gdb_assert (ti != NULL);
1132 ((struct inf_ttrace_private_thread_info *)ti->private)->dying = 1;
1133 inf_ttrace_num_lwps--;
1134
1135 /* Resume the lwp_terminate-caller thread. */
1136 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1137 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1138 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1139 /* Return without stopping the whole process. */
1140 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1141 return ptid;
1142
1143 case TTEVT_SIGNAL:
1144 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1145 ourstatus->value.sig =
1146 gdb_signal_from_host (tts.tts_u.tts_signal.tts_signo);
1147 break;
1148
1149 case TTEVT_SYSCALL_ENTRY:
1150 gdb_assert (inf_ttrace_reenable_page_protections == 0);
1151 inf_ttrace_num_lwps_in_syscall++;
1152 if (inf_ttrace_num_lwps_in_syscall == 1)
1153 {
1154 /* A thread has just entered a system call. Disable any
1155 page protections as the kernel can't deal with them. */
1156 inf_ttrace_disable_page_protections (tts.tts_pid);
1157 }
1158 ourstatus->kind = TARGET_WAITKIND_SYSCALL_ENTRY;
1159 ourstatus->value.syscall_number = tts.tts_scno;
1160 break;
1161
1162 case TTEVT_SYSCALL_RETURN:
1163 if (inf_ttrace_num_lwps_in_syscall > 0)
1164 {
1165 /* If the last thread has just left the system call, this
1166 would be a logical place to re-enable the page
1167 protections, but that doesn't work. We can't re-enable
1168 them until we've done another wait. */
1169 inf_ttrace_reenable_page_protections =
1170 (inf_ttrace_num_lwps_in_syscall == 1);
1171 inf_ttrace_num_lwps_in_syscall--;
1172 }
1173 ourstatus->kind = TARGET_WAITKIND_SYSCALL_RETURN;
1174 ourstatus->value.syscall_number = tts.tts_scno;
1175 break;
1176
1177 default:
1178 gdb_assert (!"Unexpected ttrace event");
1179 break;
1180 }
1181
1182 /* Make sure all threads within the process are stopped. */
1183 if (ttrace (TT_PROC_STOP, tts.tts_pid, 0, 0, 0, 0) == -1)
1184 perror_with_name (("ttrace"));
1185
1186 /* Now that the whole process is stopped, check if any dying thread
1187 is really dead by now. If a dying thread is still alive, it will
1188 be stopped too, and will still show up in `info threads', tagged
1189 with "(Exiting)". We could make `info threads' prune dead
1190 threads instead via inf_ttrace_thread_alive, but doing this here
1191 has the advantage that a frontend is notificed sooner of thread
1192 exits. Note that a dying lwp is still alive, it still has to be
1193 resumed, like any other lwp. */
1194 iterate_over_threads (inf_ttrace_delete_dead_threads_callback, NULL);
1195
1196 return ptid;
1197 }
1198
1199 /* Transfer LEN bytes from ADDR in the inferior's memory into READBUF,
1200 and transfer LEN bytes from WRITEBUF into the inferior's memory at
1201 ADDR. Either READBUF or WRITEBUF may be null, in which case the
1202 corresponding transfer doesn't happen. Return the number of bytes
1203 actually transferred (which may be zero if an error occurs). */
1204
1205 static LONGEST
1206 inf_ttrace_xfer_memory (CORE_ADDR addr, ULONGEST len,
1207 void *readbuf, const void *writebuf)
1208 {
1209 pid_t pid = ptid_get_pid (inferior_ptid);
1210
1211 /* HP-UX treats text space and data space differently. GDB however,
1212 doesn't really know the difference. Therefore we try both. Try
1213 text space before data space though because when we're writing
1214 into text space the instruction cache might need to be flushed. */
1215
1216 if (readbuf
1217 && ttrace (TT_PROC_RDTEXT, pid, 0, addr, len, (uintptr_t)readbuf) == -1
1218 && ttrace (TT_PROC_RDDATA, pid, 0, addr, len, (uintptr_t)readbuf) == -1)
1219 return 0;
1220
1221 if (writebuf
1222 && ttrace (TT_PROC_WRTEXT, pid, 0, addr, len, (uintptr_t)writebuf) == -1
1223 && ttrace (TT_PROC_WRDATA, pid, 0, addr, len, (uintptr_t)writebuf) == -1)
1224 return 0;
1225
1226 return len;
1227 }
1228
1229 static enum target_xfer_status
1230 inf_ttrace_xfer_partial (struct target_ops *ops, enum target_object object,
1231 const char *annex, gdb_byte *readbuf,
1232 const gdb_byte *writebuf,
1233 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
1234 {
1235 switch (object)
1236 {
1237 case TARGET_OBJECT_MEMORY:
1238 {
1239 LONGEST val = inf_ttrace_xfer_memory (offset, len, readbuf, writebuf);
1240
1241 if (val == 0)
1242 return TARGET_XFER_EOF;
1243 else
1244 {
1245 *xfered_len = (ULONGEST) val;
1246 return TARGET_XFER_OK;
1247 }
1248 }
1249
1250 case TARGET_OBJECT_UNWIND_TABLE:
1251 return TARGET_XFER_E_IO;
1252
1253 case TARGET_OBJECT_AUXV:
1254 return TARGET_XFER_E_IO;
1255
1256 case TARGET_OBJECT_WCOOKIE:
1257 return TARGET_XFER_E_IO;
1258
1259 default:
1260 return TARGET_XFER_E_IO;
1261 }
1262 }
1263
1264 /* Print status information about what we're accessing. */
1265
1266 static void
1267 inf_ttrace_files_info (struct target_ops *ignore)
1268 {
1269 struct inferior *inf = current_inferior ();
1270 printf_filtered (_("\tUsing the running image of %s %s.\n"),
1271 inf->attach_flag ? "attached" : "child",
1272 target_pid_to_str (inferior_ptid));
1273 }
1274
1275 static int
1276 inf_ttrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1277 {
1278 return 1;
1279 }
1280
1281 /* Return a string describing the state of the thread specified by
1282 INFO. */
1283
1284 static char *
1285 inf_ttrace_extra_thread_info (struct thread_info *info)
1286 {
1287 struct inf_ttrace_private_thread_info* private =
1288 (struct inf_ttrace_private_thread_info *) info->private;
1289
1290 if (private != NULL && private->dying)
1291 return "Exiting";
1292
1293 return NULL;
1294 }
1295
1296 static char *
1297 inf_ttrace_pid_to_str (struct target_ops *ops, ptid_t ptid)
1298 {
1299 pid_t pid = ptid_get_pid (ptid);
1300 lwpid_t lwpid = ptid_get_lwp (ptid);
1301 static char buf[128];
1302
1303 if (lwpid == 0)
1304 xsnprintf (buf, sizeof buf, "process %ld",
1305 (long) pid);
1306 else
1307 xsnprintf (buf, sizeof buf, "process %ld, lwp %ld",
1308 (long) pid, (long) lwpid);
1309 return buf;
1310 }
1311 \f
1312
1313 /* Implement the get_ada_task_ptid target_ops method. */
1314
1315 static ptid_t
1316 inf_ttrace_get_ada_task_ptid (long lwp, long thread)
1317 {
1318 return ptid_build (ptid_get_pid (inferior_ptid), lwp, 0);
1319 }
1320
1321 \f
1322 struct target_ops *
1323 inf_ttrace_target (void)
1324 {
1325 struct target_ops *t = inf_child_target ();
1326
1327 t->to_attach = inf_ttrace_attach;
1328 t->to_detach = inf_ttrace_detach;
1329 t->to_resume = inf_ttrace_resume;
1330 t->to_wait = inf_ttrace_wait;
1331 t->to_files_info = inf_ttrace_files_info;
1332 t->to_can_use_hw_breakpoint = inf_ttrace_can_use_hw_breakpoint;
1333 t->to_insert_watchpoint = inf_ttrace_insert_watchpoint;
1334 t->to_remove_watchpoint = inf_ttrace_remove_watchpoint;
1335 t->to_stopped_by_watchpoint = inf_ttrace_stopped_by_watchpoint;
1336 t->to_region_ok_for_hw_watchpoint =
1337 inf_ttrace_region_ok_for_hw_watchpoint;
1338 t->to_kill = inf_ttrace_kill;
1339 t->to_create_inferior = inf_ttrace_create_inferior;
1340 t->to_follow_fork = inf_ttrace_follow_fork;
1341 t->to_mourn_inferior = inf_ttrace_mourn_inferior;
1342 t->to_thread_alive = inf_ttrace_thread_alive;
1343 t->to_extra_thread_info = inf_ttrace_extra_thread_info;
1344 t->to_pid_to_str = inf_ttrace_pid_to_str;
1345 t->to_xfer_partial = inf_ttrace_xfer_partial;
1346 t->to_get_ada_task_ptid = inf_ttrace_get_ada_task_ptid;
1347
1348 return t;
1349 }
1350 #endif
1351 \f
1352
1353 /* Prevent warning from -Wmissing-prototypes. */
1354 void _initialize_inf_ttrace (void);
1355
1356 void
1357 _initialize_inf_ttrace (void)
1358 {
1359 #ifdef HAVE_TTRACE
1360 inf_ttrace_page_dict.pagesize = getpagesize();
1361 #endif
1362 }
This page took 0.057203 seconds and 4 git commands to generate.