NIOS2 Linux: Fill 'collect_regset' in regset structure.
[deliverable/binutils-gdb.git] / gdb / inf-ttrace.c
1 /* Low-level child interface to ttrace.
2
3 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 /* The ttrace(2) system call didn't exist before HP-UX 10.30. Don't
23 try to compile this code unless we have it. */
24 #ifdef HAVE_TTRACE
25
26 #include "command.h"
27 #include "gdbcore.h"
28 #include "gdbthread.h"
29 #include "inferior.h"
30 #include "terminal.h"
31 #include "target.h"
32 #include <sys/mman.h>
33 #include <sys/ttrace.h>
34 #include <signal.h>
35
36 #include "inf-child.h"
37 #include "inf-ttrace.h"
38 #include "common/filestuff.h"
39
40 \f
41
42 /* HP-UX uses a threading model where each user-space thread
43 corresponds to a kernel thread. These kernel threads are called
44 lwps. The ttrace(2) interface gives us almost full control over
45 the threads, which makes it very easy to support them in GDB. We
46 identify the threads by process ID and lwp ID. The ttrace(2) also
47 provides us with a thread's user ID (in the `tts_user_tid' member
48 of `ttstate_t') but we don't use that (yet) as it isn't necessary
49 to uniquely label the thread. */
50
51 /* Number of active lwps. */
52 static int inf_ttrace_num_lwps;
53 \f
54
55 /* On HP-UX versions that have the ttrace(2) system call, we can
56 implement "hardware" watchpoints by fiddling with the protection of
57 pages in the address space that contain the variable being watched.
58 In order to implement this, we keep a dictionary of pages for which
59 we have changed the protection. */
60
61 struct inf_ttrace_page
62 {
63 CORE_ADDR addr; /* Page address. */
64 int prot; /* Protection. */
65 int refcount; /* Reference count. */
66 struct inf_ttrace_page *next;
67 struct inf_ttrace_page *prev;
68 };
69
70 struct inf_ttrace_page_dict
71 {
72 struct inf_ttrace_page buckets[128];
73 int pagesize; /* Page size. */
74 int count; /* Number of pages in this dictionary. */
75 } inf_ttrace_page_dict;
76
77 struct inf_ttrace_private_thread_info
78 {
79 int dying;
80 };
81
82 /* Number of lwps that are currently in a system call. */
83 static int inf_ttrace_num_lwps_in_syscall;
84
85 /* Flag to indicate whether we should re-enable page protections after
86 the next wait. */
87 static int inf_ttrace_reenable_page_protections;
88
89 /* Enable system call events for process PID. */
90
91 static void
92 inf_ttrace_enable_syscall_events (pid_t pid)
93 {
94 ttevent_t tte;
95 ttstate_t tts;
96
97 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
98
99 if (ttrace (TT_PROC_GET_EVENT_MASK, pid, 0,
100 (uintptr_t)&tte, sizeof tte, 0) == -1)
101 perror_with_name (("ttrace"));
102
103 tte.tte_events |= (TTEVT_SYSCALL_ENTRY | TTEVT_SYSCALL_RETURN);
104
105 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
106 (uintptr_t)&tte, sizeof tte, 0) == -1)
107 perror_with_name (("ttrace"));
108
109 if (ttrace (TT_PROC_GET_FIRST_LWP_STATE, pid, 0,
110 (uintptr_t)&tts, sizeof tts, 0) == -1)
111 perror_with_name (("ttrace"));
112
113 if (tts.tts_flags & TTS_INSYSCALL)
114 inf_ttrace_num_lwps_in_syscall++;
115
116 /* FIXME: Handle multiple threads. */
117 }
118
119 /* Disable system call events for process PID. */
120
121 static void
122 inf_ttrace_disable_syscall_events (pid_t pid)
123 {
124 ttevent_t tte;
125
126 gdb_assert (inf_ttrace_page_dict.count == 0);
127
128 if (ttrace (TT_PROC_GET_EVENT_MASK, pid, 0,
129 (uintptr_t)&tte, sizeof tte, 0) == -1)
130 perror_with_name (("ttrace"));
131
132 tte.tte_events &= ~(TTEVT_SYSCALL_ENTRY | TTEVT_SYSCALL_RETURN);
133
134 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
135 (uintptr_t)&tte, sizeof tte, 0) == -1)
136 perror_with_name (("ttrace"));
137
138 inf_ttrace_num_lwps_in_syscall = 0;
139 }
140
141 /* Get information about the page at address ADDR for process PID from
142 the dictionary. */
143
144 static struct inf_ttrace_page *
145 inf_ttrace_get_page (pid_t pid, CORE_ADDR addr)
146 {
147 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
148 const int pagesize = inf_ttrace_page_dict.pagesize;
149 int bucket;
150 struct inf_ttrace_page *page;
151
152 bucket = (addr / pagesize) % num_buckets;
153 page = &inf_ttrace_page_dict.buckets[bucket];
154 while (page)
155 {
156 if (page->addr == addr)
157 break;
158
159 page = page->next;
160 }
161
162 return page;
163 }
164
165 /* Add the page at address ADDR for process PID to the dictionary. */
166
167 static struct inf_ttrace_page *
168 inf_ttrace_add_page (pid_t pid, CORE_ADDR addr)
169 {
170 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
171 const int pagesize = inf_ttrace_page_dict.pagesize;
172 int bucket;
173 struct inf_ttrace_page *page;
174 struct inf_ttrace_page *prev = NULL;
175
176 bucket = (addr / pagesize) % num_buckets;
177 page = &inf_ttrace_page_dict.buckets[bucket];
178 while (page)
179 {
180 if (page->addr == addr)
181 break;
182
183 prev = page;
184 page = page->next;
185 }
186
187 if (!page)
188 {
189 int prot;
190
191 if (ttrace (TT_PROC_GET_MPROTECT, pid, 0,
192 addr, 0, (uintptr_t)&prot) == -1)
193 perror_with_name (("ttrace"));
194
195 page = XNEW (struct inf_ttrace_page);
196 page->addr = addr;
197 page->prot = prot;
198 page->refcount = 0;
199 page->next = NULL;
200
201 page->prev = prev;
202 prev->next = page;
203
204 inf_ttrace_page_dict.count++;
205 if (inf_ttrace_page_dict.count == 1)
206 inf_ttrace_enable_syscall_events (pid);
207
208 if (inf_ttrace_num_lwps_in_syscall == 0)
209 {
210 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
211 addr, pagesize, prot & ~PROT_WRITE) == -1)
212 perror_with_name (("ttrace"));
213 }
214 }
215
216 return page;
217 }
218
219 /* Insert the page at address ADDR of process PID to the dictionary. */
220
221 static void
222 inf_ttrace_insert_page (pid_t pid, CORE_ADDR addr)
223 {
224 struct inf_ttrace_page *page;
225
226 page = inf_ttrace_get_page (pid, addr);
227 if (!page)
228 page = inf_ttrace_add_page (pid, addr);
229
230 page->refcount++;
231 }
232
233 /* Remove the page at address ADDR of process PID from the dictionary. */
234
235 static void
236 inf_ttrace_remove_page (pid_t pid, CORE_ADDR addr)
237 {
238 const int pagesize = inf_ttrace_page_dict.pagesize;
239 struct inf_ttrace_page *page;
240
241 page = inf_ttrace_get_page (pid, addr);
242 page->refcount--;
243
244 gdb_assert (page->refcount >= 0);
245
246 if (page->refcount == 0)
247 {
248 if (inf_ttrace_num_lwps_in_syscall == 0)
249 {
250 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
251 addr, pagesize, page->prot) == -1)
252 perror_with_name (("ttrace"));
253 }
254
255 inf_ttrace_page_dict.count--;
256 if (inf_ttrace_page_dict.count == 0)
257 inf_ttrace_disable_syscall_events (pid);
258
259 page->prev->next = page->next;
260 if (page->next)
261 page->next->prev = page->prev;
262
263 xfree (page);
264 }
265 }
266
267 /* Mask the bits in PROT from the page protections that are currently
268 in the dictionary for process PID. */
269
270 static void
271 inf_ttrace_mask_page_protections (pid_t pid, int prot)
272 {
273 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
274 const int pagesize = inf_ttrace_page_dict.pagesize;
275 int bucket;
276
277 for (bucket = 0; bucket < num_buckets; bucket++)
278 {
279 struct inf_ttrace_page *page;
280
281 page = inf_ttrace_page_dict.buckets[bucket].next;
282 while (page)
283 {
284 if (ttrace (TT_PROC_SET_MPROTECT, pid, 0,
285 page->addr, pagesize, page->prot & ~prot) == -1)
286 perror_with_name (("ttrace"));
287
288 page = page->next;
289 }
290 }
291 }
292
293 /* Write-protect the pages in the dictionary for process PID. */
294
295 static void
296 inf_ttrace_enable_page_protections (pid_t pid)
297 {
298 inf_ttrace_mask_page_protections (pid, PROT_WRITE);
299 }
300
301 /* Restore the protection of the pages in the dictionary for process
302 PID. */
303
304 static void
305 inf_ttrace_disable_page_protections (pid_t pid)
306 {
307 inf_ttrace_mask_page_protections (pid, 0);
308 }
309
310 /* Insert a "hardware" watchpoint for LEN bytes at address ADDR of
311 type TYPE. */
312
313 static int
314 inf_ttrace_insert_watchpoint (struct target_ops *self,
315 CORE_ADDR addr, int len, int type,
316 struct expression *cond)
317 {
318 const int pagesize = inf_ttrace_page_dict.pagesize;
319 pid_t pid = ptid_get_pid (inferior_ptid);
320 CORE_ADDR page_addr;
321 int num_pages;
322 int page;
323
324 gdb_assert (type == hw_write);
325
326 page_addr = (addr / pagesize) * pagesize;
327 num_pages = (len + pagesize - 1) / pagesize;
328
329 for (page = 0; page < num_pages; page++, page_addr += pagesize)
330 inf_ttrace_insert_page (pid, page_addr);
331
332 return 1;
333 }
334
335 /* Remove a "hardware" watchpoint for LEN bytes at address ADDR of
336 type TYPE. */
337
338 static int
339 inf_ttrace_remove_watchpoint (struct target_ops *self,
340 CORE_ADDR addr, int len, int type,
341 struct expression *cond)
342 {
343 const int pagesize = inf_ttrace_page_dict.pagesize;
344 pid_t pid = ptid_get_pid (inferior_ptid);
345 CORE_ADDR page_addr;
346 int num_pages;
347 int page;
348
349 gdb_assert (type == hw_write);
350
351 page_addr = (addr / pagesize) * pagesize;
352 num_pages = (len + pagesize - 1) / pagesize;
353
354 for (page = 0; page < num_pages; page++, page_addr += pagesize)
355 inf_ttrace_remove_page (pid, page_addr);
356
357 return 1;
358 }
359
360 static int
361 inf_ttrace_can_use_hw_breakpoint (struct target_ops *self,
362 int type, int len, int ot)
363 {
364 return (type == bp_hardware_watchpoint);
365 }
366
367 static int
368 inf_ttrace_region_ok_for_hw_watchpoint (struct target_ops *self,
369 CORE_ADDR addr, int len)
370 {
371 return 1;
372 }
373
374 /* Return non-zero if the current inferior was (potentially) stopped
375 by hitting a "hardware" watchpoint. */
376
377 static int
378 inf_ttrace_stopped_by_watchpoint (struct target_ops *ops)
379 {
380 pid_t pid = ptid_get_pid (inferior_ptid);
381 lwpid_t lwpid = ptid_get_lwp (inferior_ptid);
382 ttstate_t tts;
383
384 if (inf_ttrace_page_dict.count > 0)
385 {
386 if (ttrace (TT_LWP_GET_STATE, pid, lwpid,
387 (uintptr_t)&tts, sizeof tts, 0) == -1)
388 perror_with_name (("ttrace"));
389
390 if (tts.tts_event == TTEVT_SIGNAL
391 && tts.tts_u.tts_signal.tts_signo == SIGBUS)
392 {
393 const int pagesize = inf_ttrace_page_dict.pagesize;
394 void *addr = tts.tts_u.tts_signal.tts_siginfo.si_addr;
395 CORE_ADDR page_addr = ((uintptr_t)addr / pagesize) * pagesize;
396
397 if (inf_ttrace_get_page (pid, page_addr))
398 return 1;
399 }
400 }
401
402 return 0;
403 }
404 \f
405
406 /* When tracking a vfork(2), we cannot detach from the parent until
407 after the child has called exec(3) or has exited. If we are still
408 attached to the parent, this variable will be set to the process ID
409 of the parent. Otherwise it will be set to zero. */
410 static pid_t inf_ttrace_vfork_ppid = -1;
411
412 static int
413 inf_ttrace_follow_fork (struct target_ops *ops, int follow_child,
414 int detach_fork)
415 {
416 pid_t pid, fpid;
417 lwpid_t lwpid, flwpid;
418 ttstate_t tts;
419 struct thread_info *tp = inferior_thread ();
420
421 gdb_assert (tp->pending_follow.kind == TARGET_WAITKIND_FORKED
422 || tp->pending_follow.kind == TARGET_WAITKIND_VFORKED);
423
424 pid = ptid_get_pid (inferior_ptid);
425 lwpid = ptid_get_lwp (inferior_ptid);
426
427 /* Get all important details that core GDB doesn't (and shouldn't)
428 know about. */
429 if (ttrace (TT_LWP_GET_STATE, pid, lwpid,
430 (uintptr_t)&tts, sizeof tts, 0) == -1)
431 perror_with_name (("ttrace"));
432
433 gdb_assert (tts.tts_event == TTEVT_FORK || tts.tts_event == TTEVT_VFORK);
434
435 if (tts.tts_u.tts_fork.tts_isparent)
436 {
437 pid = tts.tts_pid;
438 lwpid = tts.tts_lwpid;
439 fpid = tts.tts_u.tts_fork.tts_fpid;
440 flwpid = tts.tts_u.tts_fork.tts_flwpid;
441 }
442 else
443 {
444 pid = tts.tts_u.tts_fork.tts_fpid;
445 lwpid = tts.tts_u.tts_fork.tts_flwpid;
446 fpid = tts.tts_pid;
447 flwpid = tts.tts_lwpid;
448 }
449
450 if (follow_child)
451 {
452 struct inferior *inf;
453 struct inferior *parent_inf;
454
455 parent_inf = find_inferior_pid (pid);
456
457 inferior_ptid = ptid_build (fpid, flwpid, 0);
458 inf = add_inferior (fpid);
459 inf->attach_flag = parent_inf->attach_flag;
460 inf->pspace = parent_inf->pspace;
461 inf->aspace = parent_inf->aspace;
462 copy_terminal_info (inf, parent_inf);
463 detach_breakpoints (ptid_build (pid, lwpid, 0));
464
465 target_terminal_ours ();
466 fprintf_unfiltered (gdb_stdlog,
467 _("Attaching after fork to child process %ld.\n"),
468 (long)fpid);
469 }
470 else
471 {
472 inferior_ptid = ptid_build (pid, lwpid, 0);
473 /* Detach any remaining breakpoints in the child. In the case
474 of fork events, we do not need to do this, because breakpoints
475 should have already been removed earlier. */
476 if (tts.tts_event == TTEVT_VFORK)
477 detach_breakpoints (ptid_build (fpid, flwpid, 0));
478
479 target_terminal_ours ();
480 fprintf_unfiltered (gdb_stdlog,
481 _("Detaching after fork from child process %ld.\n"),
482 (long)fpid);
483 }
484
485 if (tts.tts_event == TTEVT_VFORK)
486 {
487 gdb_assert (!tts.tts_u.tts_fork.tts_isparent);
488
489 if (follow_child)
490 {
491 /* We can't detach from the parent yet. */
492 inf_ttrace_vfork_ppid = pid;
493
494 reattach_breakpoints (fpid);
495 }
496 else
497 {
498 if (ttrace (TT_PROC_DETACH, fpid, 0, 0, 0, 0) == -1)
499 perror_with_name (("ttrace"));
500
501 /* Wait till we get the TTEVT_VFORK event in the parent.
502 This indicates that the child has called exec(3) or has
503 exited and that the parent is ready to be traced again. */
504 if (ttrace_wait (pid, lwpid, TTRACE_WAITOK, &tts, sizeof tts) == -1)
505 perror_with_name (("ttrace_wait"));
506 gdb_assert (tts.tts_event == TTEVT_VFORK);
507 gdb_assert (tts.tts_u.tts_fork.tts_isparent);
508
509 reattach_breakpoints (pid);
510 }
511 }
512 else
513 {
514 gdb_assert (tts.tts_u.tts_fork.tts_isparent);
515
516 if (follow_child)
517 {
518 if (ttrace (TT_PROC_DETACH, pid, 0, 0, 0, 0) == -1)
519 perror_with_name (("ttrace"));
520 }
521 else
522 {
523 if (ttrace (TT_PROC_DETACH, fpid, 0, 0, 0, 0) == -1)
524 perror_with_name (("ttrace"));
525 }
526 }
527
528 if (follow_child)
529 {
530 struct thread_info *ti;
531
532 /* The child will start out single-threaded. */
533 inf_ttrace_num_lwps = 1;
534 inf_ttrace_num_lwps_in_syscall = 0;
535
536 /* Delete parent. */
537 delete_thread_silent (ptid_build (pid, lwpid, 0));
538 detach_inferior (pid);
539
540 /* Add child thread. inferior_ptid was already set above. */
541 ti = add_thread_silent (inferior_ptid);
542 ti->private =
543 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
544 memset (ti->private, 0,
545 sizeof (struct inf_ttrace_private_thread_info));
546 }
547
548 return 0;
549 }
550 \f
551
552 /* File descriptors for pipes used as semaphores during initial
553 startup of an inferior. */
554 static int inf_ttrace_pfd1[2];
555 static int inf_ttrace_pfd2[2];
556
557 static void
558 do_cleanup_pfds (void *dummy)
559 {
560 close (inf_ttrace_pfd1[0]);
561 close (inf_ttrace_pfd1[1]);
562 close (inf_ttrace_pfd2[0]);
563 close (inf_ttrace_pfd2[1]);
564
565 unmark_fd_no_cloexec (inf_ttrace_pfd1[0]);
566 unmark_fd_no_cloexec (inf_ttrace_pfd1[1]);
567 unmark_fd_no_cloexec (inf_ttrace_pfd2[0]);
568 unmark_fd_no_cloexec (inf_ttrace_pfd2[1]);
569 }
570
571 static void
572 inf_ttrace_prepare (void)
573 {
574 if (pipe (inf_ttrace_pfd1) == -1)
575 perror_with_name (("pipe"));
576
577 if (pipe (inf_ttrace_pfd2) == -1)
578 {
579 close (inf_ttrace_pfd1[0]);
580 close (inf_ttrace_pfd2[0]);
581 perror_with_name (("pipe"));
582 }
583
584 mark_fd_no_cloexec (inf_ttrace_pfd1[0]);
585 mark_fd_no_cloexec (inf_ttrace_pfd1[1]);
586 mark_fd_no_cloexec (inf_ttrace_pfd2[0]);
587 mark_fd_no_cloexec (inf_ttrace_pfd2[1]);
588 }
589
590 /* Prepare to be traced. */
591
592 static void
593 inf_ttrace_me (void)
594 {
595 struct cleanup *old_chain = make_cleanup (do_cleanup_pfds, 0);
596 char c;
597
598 /* "Trace me, Dr. Memory!" */
599 if (ttrace (TT_PROC_SETTRC, 0, 0, 0, TT_VERSION, 0) == -1)
600 perror_with_name (("ttrace"));
601
602 /* Tell our parent that we are ready to be traced. */
603 if (write (inf_ttrace_pfd1[1], &c, sizeof c) != sizeof c)
604 perror_with_name (("write"));
605
606 /* Wait until our parent has set the initial event mask. */
607 if (read (inf_ttrace_pfd2[0], &c, sizeof c) != sizeof c)
608 perror_with_name (("read"));
609
610 do_cleanups (old_chain);
611 }
612
613 /* Start tracing PID. */
614
615 static void
616 inf_ttrace_him (struct target_ops *ops, int pid)
617 {
618 struct cleanup *old_chain = make_cleanup (do_cleanup_pfds, 0);
619 ttevent_t tte;
620 char c;
621
622 /* Wait until our child is ready to be traced. */
623 if (read (inf_ttrace_pfd1[0], &c, sizeof c) != sizeof c)
624 perror_with_name (("read"));
625
626 /* Set the initial event mask. */
627 memset (&tte, 0, sizeof (tte));
628 tte.tte_events |= TTEVT_EXEC | TTEVT_EXIT | TTEVT_FORK | TTEVT_VFORK;
629 tte.tte_events |= TTEVT_LWP_CREATE | TTEVT_LWP_EXIT | TTEVT_LWP_TERMINATE;
630 #ifdef TTEVT_BPT_SSTEP
631 tte.tte_events |= TTEVT_BPT_SSTEP;
632 #endif
633 tte.tte_opts |= TTEO_PROC_INHERIT;
634 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
635 (uintptr_t)&tte, sizeof tte, 0) == -1)
636 perror_with_name (("ttrace"));
637
638 /* Tell our child that we have set the initial event mask. */
639 if (write (inf_ttrace_pfd2[1], &c, sizeof c) != sizeof c)
640 perror_with_name (("write"));
641
642 do_cleanups (old_chain);
643
644 if (!target_is_pushed (ops))
645 push_target (ops);
646
647 startup_inferior (START_INFERIOR_TRAPS_EXPECTED);
648
649 /* On some targets, there must be some explicit actions taken after
650 the inferior has been started up. */
651 target_post_startup_inferior (pid_to_ptid (pid));
652 }
653
654 static void
655 inf_ttrace_create_inferior (struct target_ops *ops, char *exec_file,
656 char *allargs, char **env, int from_tty)
657 {
658 int pid;
659
660 gdb_assert (inf_ttrace_num_lwps == 0);
661 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
662 gdb_assert (inf_ttrace_page_dict.count == 0);
663 gdb_assert (inf_ttrace_reenable_page_protections == 0);
664 gdb_assert (inf_ttrace_vfork_ppid == -1);
665
666 pid = fork_inferior (exec_file, allargs, env, inf_ttrace_me, NULL,
667 inf_ttrace_prepare, NULL, NULL);
668
669 inf_ttrace_him (ops, pid);
670 }
671
672 static void
673 inf_ttrace_mourn_inferior (struct target_ops *ops)
674 {
675 const int num_buckets = ARRAY_SIZE (inf_ttrace_page_dict.buckets);
676 int bucket;
677
678 inf_ttrace_num_lwps = 0;
679 inf_ttrace_num_lwps_in_syscall = 0;
680
681 for (bucket = 0; bucket < num_buckets; bucket++)
682 {
683 struct inf_ttrace_page *page;
684 struct inf_ttrace_page *next;
685
686 page = inf_ttrace_page_dict.buckets[bucket].next;
687 while (page)
688 {
689 next = page->next;
690 xfree (page);
691 page = next;
692 }
693 }
694 inf_ttrace_page_dict.count = 0;
695
696 inf_child_mourn_inferior (ops);
697 }
698
699 /* Assuming we just attached the debugger to a new inferior, create
700 a new thread_info structure for each thread, and add it to our
701 list of threads. */
702
703 static void
704 inf_ttrace_create_threads_after_attach (int pid)
705 {
706 int status;
707 ptid_t ptid;
708 ttstate_t tts;
709 struct thread_info *ti;
710
711 status = ttrace (TT_PROC_GET_FIRST_LWP_STATE, pid, 0,
712 (uintptr_t) &tts, sizeof (ttstate_t), 0);
713 if (status < 0)
714 perror_with_name (_("TT_PROC_GET_FIRST_LWP_STATE ttrace call failed"));
715 gdb_assert (tts.tts_pid == pid);
716
717 /* Add the stopped thread. */
718 ptid = ptid_build (pid, tts.tts_lwpid, 0);
719 ti = add_thread (ptid);
720 ti->private = xzalloc (sizeof (struct inf_ttrace_private_thread_info));
721 inf_ttrace_num_lwps++;
722
723 /* We use the "first stopped thread" as the currently active thread. */
724 inferior_ptid = ptid;
725
726 /* Iterative over all the remaining threads. */
727
728 for (;;)
729 {
730 ptid_t ptid;
731
732 status = ttrace (TT_PROC_GET_NEXT_LWP_STATE, pid, 0,
733 (uintptr_t) &tts, sizeof (ttstate_t), 0);
734 if (status < 0)
735 perror_with_name (_("TT_PROC_GET_NEXT_LWP_STATE ttrace call failed"));
736 if (status == 0)
737 break; /* End of list. */
738
739 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
740 ti = add_thread (ptid);
741 ti->private = xzalloc (sizeof (struct inf_ttrace_private_thread_info));
742 inf_ttrace_num_lwps++;
743 }
744 }
745
746 static void
747 inf_ttrace_attach (struct target_ops *ops, const char *args, int from_tty)
748 {
749 char *exec_file;
750 pid_t pid;
751 ttevent_t tte;
752 struct inferior *inf;
753
754 pid = parse_pid_to_attach (args);
755
756 if (pid == getpid ()) /* Trying to masturbate? */
757 error (_("I refuse to debug myself!"));
758
759 if (from_tty)
760 {
761 exec_file = get_exec_file (0);
762
763 if (exec_file)
764 printf_unfiltered (_("Attaching to program: %s, %s\n"), exec_file,
765 target_pid_to_str (pid_to_ptid (pid)));
766 else
767 printf_unfiltered (_("Attaching to %s\n"),
768 target_pid_to_str (pid_to_ptid (pid)));
769
770 gdb_flush (gdb_stdout);
771 }
772
773 gdb_assert (inf_ttrace_num_lwps == 0);
774 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
775 gdb_assert (inf_ttrace_vfork_ppid == -1);
776
777 if (ttrace (TT_PROC_ATTACH, pid, 0, TT_KILL_ON_EXIT, TT_VERSION, 0) == -1)
778 perror_with_name (("ttrace"));
779
780 inf = current_inferior ();
781 inferior_appeared (inf, pid);
782 inf->attach_flag = 1;
783
784 /* Set the initial event mask. */
785 memset (&tte, 0, sizeof (tte));
786 tte.tte_events |= TTEVT_EXEC | TTEVT_EXIT | TTEVT_FORK | TTEVT_VFORK;
787 tte.tte_events |= TTEVT_LWP_CREATE | TTEVT_LWP_EXIT | TTEVT_LWP_TERMINATE;
788 #ifdef TTEVT_BPT_SSTEP
789 tte.tte_events |= TTEVT_BPT_SSTEP;
790 #endif
791 tte.tte_opts |= TTEO_PROC_INHERIT;
792 if (ttrace (TT_PROC_SET_EVENT_MASK, pid, 0,
793 (uintptr_t)&tte, sizeof tte, 0) == -1)
794 perror_with_name (("ttrace"));
795
796 if (!target_is_pushed (ops))
797 push_target (ops);
798
799 inf_ttrace_create_threads_after_attach (pid);
800 }
801
802 static void
803 inf_ttrace_detach (struct target_ops *ops, const char *args, int from_tty)
804 {
805 pid_t pid = ptid_get_pid (inferior_ptid);
806 int sig = 0;
807
808 if (from_tty)
809 {
810 char *exec_file = get_exec_file (0);
811 if (exec_file == 0)
812 exec_file = "";
813 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
814 target_pid_to_str (pid_to_ptid (pid)));
815 gdb_flush (gdb_stdout);
816 }
817 if (args)
818 sig = atoi (args);
819
820 /* ??? The HP-UX 11.0 ttrace(2) manual page doesn't mention that we
821 can pass a signal number here. Does this really work? */
822 if (ttrace (TT_PROC_DETACH, pid, 0, 0, sig, 0) == -1)
823 perror_with_name (("ttrace"));
824
825 if (inf_ttrace_vfork_ppid != -1)
826 {
827 if (ttrace (TT_PROC_DETACH, inf_ttrace_vfork_ppid, 0, 0, 0, 0) == -1)
828 perror_with_name (("ttrace"));
829 inf_ttrace_vfork_ppid = -1;
830 }
831
832 inf_ttrace_num_lwps = 0;
833 inf_ttrace_num_lwps_in_syscall = 0;
834
835 inferior_ptid = null_ptid;
836 detach_inferior (pid);
837
838 inf_child_maybe_unpush_target (ops);
839 }
840
841 static void
842 inf_ttrace_kill (struct target_ops *ops)
843 {
844 pid_t pid = ptid_get_pid (inferior_ptid);
845
846 if (pid == 0)
847 return;
848
849 if (ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0) == -1)
850 perror_with_name (("ttrace"));
851 /* ??? Is it necessary to call ttrace_wait() here? */
852
853 if (inf_ttrace_vfork_ppid != -1)
854 {
855 if (ttrace (TT_PROC_DETACH, inf_ttrace_vfork_ppid, 0, 0, 0, 0) == -1)
856 perror_with_name (("ttrace"));
857 inf_ttrace_vfork_ppid = -1;
858 }
859
860 target_mourn_inferior ();
861 }
862
863 /* Check is a dying thread is dead by now, and delete it from GDBs
864 thread list if so. */
865 static int
866 inf_ttrace_delete_dead_threads_callback (struct thread_info *info, void *arg)
867 {
868 lwpid_t lwpid;
869 struct inf_ttrace_private_thread_info *p;
870
871 if (is_exited (info->ptid))
872 return 0;
873
874 lwpid = ptid_get_lwp (info->ptid);
875 p = (struct inf_ttrace_private_thread_info *) info->private;
876
877 /* Check if an lwp that was dying is still there or not. */
878 if (p->dying && (kill (lwpid, 0) == -1))
879 /* It's gone now. */
880 delete_thread (info->ptid);
881
882 return 0;
883 }
884
885 /* Resume the lwp pointed to by INFO, with REQUEST, and pass it signal
886 SIG. */
887
888 static void
889 inf_ttrace_resume_lwp (struct thread_info *info, ttreq_t request, int sig)
890 {
891 pid_t pid = ptid_get_pid (info->ptid);
892 lwpid_t lwpid = ptid_get_lwp (info->ptid);
893
894 if (ttrace (request, pid, lwpid, TT_NOPC, sig, 0) == -1)
895 {
896 struct inf_ttrace_private_thread_info *p
897 = (struct inf_ttrace_private_thread_info *) info->private;
898 if (p->dying && errno == EPROTO)
899 /* This is expected, it means the dying lwp is really gone
900 by now. If ttrace had an event to inform the debugger
901 the lwp is really gone, this wouldn't be needed. */
902 delete_thread (info->ptid);
903 else
904 /* This was really unexpected. */
905 perror_with_name (("ttrace"));
906 }
907 }
908
909 /* Callback for iterate_over_threads. */
910
911 static int
912 inf_ttrace_resume_callback (struct thread_info *info, void *arg)
913 {
914 if (!ptid_equal (info->ptid, inferior_ptid) && !is_exited (info->ptid))
915 inf_ttrace_resume_lwp (info, TT_LWP_CONTINUE, 0);
916
917 return 0;
918 }
919
920 static void
921 inf_ttrace_resume (struct target_ops *ops,
922 ptid_t ptid, int step, enum gdb_signal signal)
923 {
924 int resume_all;
925 ttreq_t request = step ? TT_LWP_SINGLE : TT_LWP_CONTINUE;
926 int sig = gdb_signal_to_host (signal);
927 struct thread_info *info;
928
929 /* A specific PTID means `step only this process id'. */
930 resume_all = (ptid_equal (ptid, minus_one_ptid));
931
932 /* If resuming all threads, it's the current thread that should be
933 handled specially. */
934 if (resume_all)
935 ptid = inferior_ptid;
936
937 info = find_thread_ptid (ptid);
938 inf_ttrace_resume_lwp (info, request, sig);
939
940 if (resume_all)
941 /* Let all the other threads run too. */
942 iterate_over_threads (inf_ttrace_resume_callback, NULL);
943 }
944
945 static ptid_t
946 inf_ttrace_wait (struct target_ops *ops,
947 ptid_t ptid, struct target_waitstatus *ourstatus, int options)
948 {
949 pid_t pid = ptid_get_pid (ptid);
950 lwpid_t lwpid = ptid_get_lwp (ptid);
951 ttstate_t tts;
952 struct thread_info *ti;
953 ptid_t related_ptid;
954
955 /* Until proven otherwise. */
956 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
957
958 if (pid == -1)
959 pid = lwpid = 0;
960
961 gdb_assert (pid != 0 || lwpid == 0);
962
963 do
964 {
965 set_sigint_trap ();
966
967 if (ttrace_wait (pid, lwpid, TTRACE_WAITOK, &tts, sizeof tts) == -1)
968 perror_with_name (("ttrace_wait"));
969
970 if (tts.tts_event == TTEVT_VFORK && tts.tts_u.tts_fork.tts_isparent)
971 {
972 if (inf_ttrace_vfork_ppid != -1)
973 {
974 gdb_assert (inf_ttrace_vfork_ppid == tts.tts_pid);
975
976 if (ttrace (TT_PROC_DETACH, tts.tts_pid, 0, 0, 0, 0) == -1)
977 perror_with_name (("ttrace"));
978 inf_ttrace_vfork_ppid = -1;
979 }
980
981 tts.tts_event = TTEVT_NONE;
982 }
983
984 clear_sigint_trap ();
985 }
986 while (tts.tts_event == TTEVT_NONE);
987
988 /* Now that we've waited, we can re-enable the page protections. */
989 if (inf_ttrace_reenable_page_protections)
990 {
991 gdb_assert (inf_ttrace_num_lwps_in_syscall == 0);
992 inf_ttrace_enable_page_protections (tts.tts_pid);
993 inf_ttrace_reenable_page_protections = 0;
994 }
995
996 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
997
998 if (inf_ttrace_num_lwps == 0)
999 {
1000 struct thread_info *ti;
1001
1002 inf_ttrace_num_lwps = 1;
1003
1004 /* This is the earliest we hear about the lwp member of
1005 INFERIOR_PTID, after an attach or fork_inferior. */
1006 gdb_assert (ptid_get_lwp (inferior_ptid) == 0);
1007
1008 /* We haven't set the private member on the main thread yet. Do
1009 it now. */
1010 ti = find_thread_ptid (inferior_ptid);
1011 gdb_assert (ti != NULL && ti->private == NULL);
1012 ti->private =
1013 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
1014 memset (ti->private, 0,
1015 sizeof (struct inf_ttrace_private_thread_info));
1016
1017 /* Notify the core that this ptid changed. This changes
1018 inferior_ptid as well. */
1019 thread_change_ptid (inferior_ptid, ptid);
1020 }
1021
1022 switch (tts.tts_event)
1023 {
1024 #ifdef TTEVT_BPT_SSTEP
1025 case TTEVT_BPT_SSTEP:
1026 /* Make it look like a breakpoint. */
1027 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1028 ourstatus->value.sig = GDB_SIGNAL_TRAP;
1029 break;
1030 #endif
1031
1032 case TTEVT_EXEC:
1033 ourstatus->kind = TARGET_WAITKIND_EXECD;
1034 ourstatus->value.execd_pathname =
1035 xmalloc (tts.tts_u.tts_exec.tts_pathlen + 1);
1036 if (ttrace (TT_PROC_GET_PATHNAME, tts.tts_pid, 0,
1037 (uintptr_t)ourstatus->value.execd_pathname,
1038 tts.tts_u.tts_exec.tts_pathlen, 0) == -1)
1039 perror_with_name (("ttrace"));
1040 ourstatus->value.execd_pathname[tts.tts_u.tts_exec.tts_pathlen] = 0;
1041
1042 /* At this point, all inserted breakpoints are gone. Doing this
1043 as soon as we detect an exec prevents the badness of deleting
1044 a breakpoint writing the current "shadow contents" to lift
1045 the bp. That shadow is NOT valid after an exec. */
1046 mark_breakpoints_out ();
1047 break;
1048
1049 case TTEVT_EXIT:
1050 store_waitstatus (ourstatus, tts.tts_u.tts_exit.tts_exitcode);
1051 inf_ttrace_num_lwps = 0;
1052 break;
1053
1054 case TTEVT_FORK:
1055 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1056 tts.tts_u.tts_fork.tts_flwpid, 0);
1057
1058 ourstatus->kind = TARGET_WAITKIND_FORKED;
1059 ourstatus->value.related_pid = related_ptid;
1060
1061 /* Make sure the other end of the fork is stopped too. */
1062 if (ttrace_wait (tts.tts_u.tts_fork.tts_fpid,
1063 tts.tts_u.tts_fork.tts_flwpid,
1064 TTRACE_WAITOK, &tts, sizeof tts) == -1)
1065 perror_with_name (("ttrace_wait"));
1066
1067 gdb_assert (tts.tts_event == TTEVT_FORK);
1068 if (tts.tts_u.tts_fork.tts_isparent)
1069 {
1070 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1071 tts.tts_u.tts_fork.tts_flwpid, 0);
1072 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1073 ourstatus->value.related_pid = related_ptid;
1074 }
1075 break;
1076
1077 case TTEVT_VFORK:
1078 gdb_assert (!tts.tts_u.tts_fork.tts_isparent);
1079
1080 related_ptid = ptid_build (tts.tts_u.tts_fork.tts_fpid,
1081 tts.tts_u.tts_fork.tts_flwpid, 0);
1082
1083 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1084 ourstatus->value.related_pid = related_ptid;
1085
1086 /* HACK: To avoid touching the parent during the vfork, switch
1087 away from it. */
1088 inferior_ptid = ptid;
1089 break;
1090
1091 case TTEVT_LWP_CREATE:
1092 lwpid = tts.tts_u.tts_thread.tts_target_lwpid;
1093 ptid = ptid_build (tts.tts_pid, lwpid, 0);
1094 ti = add_thread (ptid);
1095 ti->private =
1096 xmalloc (sizeof (struct inf_ttrace_private_thread_info));
1097 memset (ti->private, 0,
1098 sizeof (struct inf_ttrace_private_thread_info));
1099 inf_ttrace_num_lwps++;
1100 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1101 /* Let the lwp_create-caller thread continue. */
1102 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1103 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1104 /* Return without stopping the whole process. */
1105 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1106 return ptid;
1107
1108 case TTEVT_LWP_EXIT:
1109 if (print_thread_events)
1110 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (ptid));
1111 ti = find_thread_ptid (ptid);
1112 gdb_assert (ti != NULL);
1113 ((struct inf_ttrace_private_thread_info *)ti->private)->dying = 1;
1114 inf_ttrace_num_lwps--;
1115 /* Let the thread really exit. */
1116 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1117 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1118 /* Return without stopping the whole process. */
1119 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1120 return ptid;
1121
1122 case TTEVT_LWP_TERMINATE:
1123 lwpid = tts.tts_u.tts_thread.tts_target_lwpid;
1124 ptid = ptid_build (tts.tts_pid, lwpid, 0);
1125 if (print_thread_events)
1126 printf_unfiltered(_("[%s has been terminated]\n"),
1127 target_pid_to_str (ptid));
1128 ti = find_thread_ptid (ptid);
1129 gdb_assert (ti != NULL);
1130 ((struct inf_ttrace_private_thread_info *)ti->private)->dying = 1;
1131 inf_ttrace_num_lwps--;
1132
1133 /* Resume the lwp_terminate-caller thread. */
1134 ptid = ptid_build (tts.tts_pid, tts.tts_lwpid, 0);
1135 ttrace (TT_LWP_CONTINUE, ptid_get_pid (ptid),
1136 ptid_get_lwp (ptid), TT_NOPC, 0, 0);
1137 /* Return without stopping the whole process. */
1138 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1139 return ptid;
1140
1141 case TTEVT_SIGNAL:
1142 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1143 ourstatus->value.sig =
1144 gdb_signal_from_host (tts.tts_u.tts_signal.tts_signo);
1145 break;
1146
1147 case TTEVT_SYSCALL_ENTRY:
1148 gdb_assert (inf_ttrace_reenable_page_protections == 0);
1149 inf_ttrace_num_lwps_in_syscall++;
1150 if (inf_ttrace_num_lwps_in_syscall == 1)
1151 {
1152 /* A thread has just entered a system call. Disable any
1153 page protections as the kernel can't deal with them. */
1154 inf_ttrace_disable_page_protections (tts.tts_pid);
1155 }
1156 ourstatus->kind = TARGET_WAITKIND_SYSCALL_ENTRY;
1157 ourstatus->value.syscall_number = tts.tts_scno;
1158 break;
1159
1160 case TTEVT_SYSCALL_RETURN:
1161 if (inf_ttrace_num_lwps_in_syscall > 0)
1162 {
1163 /* If the last thread has just left the system call, this
1164 would be a logical place to re-enable the page
1165 protections, but that doesn't work. We can't re-enable
1166 them until we've done another wait. */
1167 inf_ttrace_reenable_page_protections =
1168 (inf_ttrace_num_lwps_in_syscall == 1);
1169 inf_ttrace_num_lwps_in_syscall--;
1170 }
1171 ourstatus->kind = TARGET_WAITKIND_SYSCALL_RETURN;
1172 ourstatus->value.syscall_number = tts.tts_scno;
1173 break;
1174
1175 default:
1176 gdb_assert (!"Unexpected ttrace event");
1177 break;
1178 }
1179
1180 /* Make sure all threads within the process are stopped. */
1181 if (ttrace (TT_PROC_STOP, tts.tts_pid, 0, 0, 0, 0) == -1)
1182 perror_with_name (("ttrace"));
1183
1184 /* Now that the whole process is stopped, check if any dying thread
1185 is really dead by now. If a dying thread is still alive, it will
1186 be stopped too, and will still show up in `info threads', tagged
1187 with "(Exiting)". We could make `info threads' prune dead
1188 threads instead via inf_ttrace_thread_alive, but doing this here
1189 has the advantage that a frontend is notificed sooner of thread
1190 exits. Note that a dying lwp is still alive, it still has to be
1191 resumed, like any other lwp. */
1192 iterate_over_threads (inf_ttrace_delete_dead_threads_callback, NULL);
1193
1194 return ptid;
1195 }
1196
1197 /* Transfer LEN bytes from ADDR in the inferior's memory into READBUF,
1198 and transfer LEN bytes from WRITEBUF into the inferior's memory at
1199 ADDR. Either READBUF or WRITEBUF may be null, in which case the
1200 corresponding transfer doesn't happen. Return the number of bytes
1201 actually transferred (which may be zero if an error occurs). */
1202
1203 static LONGEST
1204 inf_ttrace_xfer_memory (CORE_ADDR addr, ULONGEST len,
1205 void *readbuf, const void *writebuf)
1206 {
1207 pid_t pid = ptid_get_pid (inferior_ptid);
1208
1209 /* HP-UX treats text space and data space differently. GDB however,
1210 doesn't really know the difference. Therefore we try both. Try
1211 text space before data space though because when we're writing
1212 into text space the instruction cache might need to be flushed. */
1213
1214 if (readbuf
1215 && ttrace (TT_PROC_RDTEXT, pid, 0, addr, len, (uintptr_t)readbuf) == -1
1216 && ttrace (TT_PROC_RDDATA, pid, 0, addr, len, (uintptr_t)readbuf) == -1)
1217 return 0;
1218
1219 if (writebuf
1220 && ttrace (TT_PROC_WRTEXT, pid, 0, addr, len, (uintptr_t)writebuf) == -1
1221 && ttrace (TT_PROC_WRDATA, pid, 0, addr, len, (uintptr_t)writebuf) == -1)
1222 return 0;
1223
1224 return len;
1225 }
1226
1227 static enum target_xfer_status
1228 inf_ttrace_xfer_partial (struct target_ops *ops, enum target_object object,
1229 const char *annex, gdb_byte *readbuf,
1230 const gdb_byte *writebuf,
1231 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
1232 {
1233 switch (object)
1234 {
1235 case TARGET_OBJECT_MEMORY:
1236 {
1237 LONGEST val = inf_ttrace_xfer_memory (offset, len, readbuf, writebuf);
1238
1239 if (val == 0)
1240 return TARGET_XFER_EOF;
1241 else
1242 {
1243 *xfered_len = (ULONGEST) val;
1244 return TARGET_XFER_OK;
1245 }
1246 }
1247
1248 case TARGET_OBJECT_UNWIND_TABLE:
1249 return TARGET_XFER_E_IO;
1250
1251 case TARGET_OBJECT_AUXV:
1252 return TARGET_XFER_E_IO;
1253
1254 case TARGET_OBJECT_WCOOKIE:
1255 return TARGET_XFER_E_IO;
1256
1257 default:
1258 return TARGET_XFER_E_IO;
1259 }
1260 }
1261
1262 /* Print status information about what we're accessing. */
1263
1264 static void
1265 inf_ttrace_files_info (struct target_ops *ignore)
1266 {
1267 struct inferior *inf = current_inferior ();
1268 printf_filtered (_("\tUsing the running image of %s %s.\n"),
1269 inf->attach_flag ? "attached" : "child",
1270 target_pid_to_str (inferior_ptid));
1271 }
1272
1273 static int
1274 inf_ttrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1275 {
1276 return 1;
1277 }
1278
1279 /* Return a string describing the state of the thread specified by
1280 INFO. */
1281
1282 static char *
1283 inf_ttrace_extra_thread_info (struct target_ops *self,
1284 struct thread_info *info)
1285 {
1286 struct inf_ttrace_private_thread_info* private =
1287 (struct inf_ttrace_private_thread_info *) info->private;
1288
1289 if (private != NULL && private->dying)
1290 return "Exiting";
1291
1292 return NULL;
1293 }
1294
1295 static char *
1296 inf_ttrace_pid_to_str (struct target_ops *ops, ptid_t ptid)
1297 {
1298 pid_t pid = ptid_get_pid (ptid);
1299 lwpid_t lwpid = ptid_get_lwp (ptid);
1300 static char buf[128];
1301
1302 if (lwpid == 0)
1303 xsnprintf (buf, sizeof buf, "process %ld",
1304 (long) pid);
1305 else
1306 xsnprintf (buf, sizeof buf, "process %ld, lwp %ld",
1307 (long) pid, (long) lwpid);
1308 return buf;
1309 }
1310 \f
1311
1312 /* Implement the get_ada_task_ptid target_ops method. */
1313
1314 static ptid_t
1315 inf_ttrace_get_ada_task_ptid (struct target_ops *self, long lwp, long thread)
1316 {
1317 return ptid_build (ptid_get_pid (inferior_ptid), lwp, 0);
1318 }
1319
1320 \f
1321 struct target_ops *
1322 inf_ttrace_target (void)
1323 {
1324 struct target_ops *t = inf_child_target ();
1325
1326 t->to_attach = inf_ttrace_attach;
1327 t->to_detach = inf_ttrace_detach;
1328 t->to_resume = inf_ttrace_resume;
1329 t->to_wait = inf_ttrace_wait;
1330 t->to_files_info = inf_ttrace_files_info;
1331 t->to_can_use_hw_breakpoint = inf_ttrace_can_use_hw_breakpoint;
1332 t->to_insert_watchpoint = inf_ttrace_insert_watchpoint;
1333 t->to_remove_watchpoint = inf_ttrace_remove_watchpoint;
1334 t->to_stopped_by_watchpoint = inf_ttrace_stopped_by_watchpoint;
1335 t->to_region_ok_for_hw_watchpoint =
1336 inf_ttrace_region_ok_for_hw_watchpoint;
1337 t->to_kill = inf_ttrace_kill;
1338 t->to_create_inferior = inf_ttrace_create_inferior;
1339 t->to_follow_fork = inf_ttrace_follow_fork;
1340 t->to_mourn_inferior = inf_ttrace_mourn_inferior;
1341 t->to_thread_alive = inf_ttrace_thread_alive;
1342 t->to_extra_thread_info = inf_ttrace_extra_thread_info;
1343 t->to_pid_to_str = inf_ttrace_pid_to_str;
1344 t->to_xfer_partial = inf_ttrace_xfer_partial;
1345 t->to_get_ada_task_ptid = inf_ttrace_get_ada_task_ptid;
1346
1347 return t;
1348 }
1349 #endif
1350 \f
1351
1352 /* Prevent warning from -Wmissing-prototypes. */
1353 void _initialize_inf_ttrace (void);
1354
1355 void
1356 _initialize_inf_ttrace (void)
1357 {
1358 #ifdef HAVE_TTRACE
1359 inf_ttrace_page_dict.pagesize = getpagesize();
1360 #endif
1361 }
This page took 0.096146 seconds and 4 git commands to generate.