* linux-low.c (get_stop_pc): Print pc if debug_threads.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22 #include "ansidecl.h" /* For ATTRIBUTE_PACKED, must be bug in external.h. */
23 #include "elf/common.h"
24 #include "elf/external.h"
25
26 #include <sys/wait.h>
27 #include <stdio.h>
28 #include <sys/param.h>
29 #include <sys/ptrace.h>
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43
44 #ifndef PTRACE_GETSIGINFO
45 # define PTRACE_GETSIGINFO 0x4202
46 # define PTRACE_SETSIGINFO 0x4203
47 #endif
48
49 #ifndef O_LARGEFILE
50 #define O_LARGEFILE 0
51 #endif
52
53 /* If the system headers did not provide the constants, hard-code the normal
54 values. */
55 #ifndef PTRACE_EVENT_FORK
56
57 #define PTRACE_SETOPTIONS 0x4200
58 #define PTRACE_GETEVENTMSG 0x4201
59
60 /* options set using PTRACE_SETOPTIONS */
61 #define PTRACE_O_TRACESYSGOOD 0x00000001
62 #define PTRACE_O_TRACEFORK 0x00000002
63 #define PTRACE_O_TRACEVFORK 0x00000004
64 #define PTRACE_O_TRACECLONE 0x00000008
65 #define PTRACE_O_TRACEEXEC 0x00000010
66 #define PTRACE_O_TRACEVFORKDONE 0x00000020
67 #define PTRACE_O_TRACEEXIT 0x00000040
68
69 /* Wait extended result codes for the above trace options. */
70 #define PTRACE_EVENT_FORK 1
71 #define PTRACE_EVENT_VFORK 2
72 #define PTRACE_EVENT_CLONE 3
73 #define PTRACE_EVENT_EXEC 4
74 #define PTRACE_EVENT_VFORK_DONE 5
75 #define PTRACE_EVENT_EXIT 6
76
77 #endif /* PTRACE_EVENT_FORK */
78
79 /* We can't always assume that this flag is available, but all systems
80 with the ptrace event handlers also have __WALL, so it's safe to use
81 in some contexts. */
82 #ifndef __WALL
83 #define __WALL 0x40000000 /* Wait for any child. */
84 #endif
85
86 #ifdef __UCLIBC__
87 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
88 #define HAS_NOMMU
89 #endif
90 #endif
91
92 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
93 representation of the thread ID.
94
95 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
96 the same as the LWP ID.
97
98 ``all_processes'' is keyed by the "overall process ID", which
99 GNU/Linux calls tgid, "thread group ID". */
100
101 struct inferior_list all_lwps;
102
103 /* A list of all unknown processes which receive stop signals. Some other
104 process will presumably claim each of these as forked children
105 momentarily. */
106
107 struct inferior_list stopped_pids;
108
109 /* FIXME this is a bit of a hack, and could be removed. */
110 int stopping_threads;
111
112 /* FIXME make into a target method? */
113 int using_threads = 1;
114
115 /* This flag is true iff we've just created or attached to our first
116 inferior but it has not stopped yet. As soon as it does, we need
117 to call the low target's arch_setup callback. Doing this only on
118 the first inferior avoids reinializing the architecture on every
119 inferior, and avoids messing with the register caches of the
120 already running inferiors. NOTE: this assumes all inferiors under
121 control of gdbserver have the same architecture. */
122 static int new_inferior;
123
124 static void linux_resume_one_lwp (struct lwp_info *lwp,
125 int step, int signal, siginfo_t *info);
126 static void linux_resume (struct thread_resume *resume_info, size_t n);
127 static void stop_all_lwps (void);
128 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
129 static int check_removed_breakpoint (struct lwp_info *event_child);
130 static void *add_lwp (ptid_t ptid);
131 static int my_waitpid (int pid, int *status, int flags);
132 static int linux_stopped_by_watchpoint (void);
133 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
134
135 struct pending_signals
136 {
137 int signal;
138 siginfo_t info;
139 struct pending_signals *prev;
140 };
141
142 #define PTRACE_ARG3_TYPE long
143 #define PTRACE_XFER_TYPE long
144
145 #ifdef HAVE_LINUX_REGSETS
146 static char *disabled_regsets;
147 static int num_regsets;
148 #endif
149
150 /* The read/write ends of the pipe registered as waitable file in the
151 event loop. */
152 static int linux_event_pipe[2] = { -1, -1 };
153
154 /* True if we're currently in async mode. */
155 #define target_is_async_p() (linux_event_pipe[0] != -1)
156
157 static void send_sigstop (struct inferior_list_entry *entry);
158 static void wait_for_sigstop (struct inferior_list_entry *entry);
159
160 /* Accepts an integer PID; Returns a string representing a file that
161 can be opened to get info for the child process.
162 Space for the result is malloc'd, caller must free. */
163
164 char *
165 linux_child_pid_to_exec_file (int pid)
166 {
167 char *name1, *name2;
168
169 name1 = xmalloc (MAXPATHLEN);
170 name2 = xmalloc (MAXPATHLEN);
171 memset (name2, 0, MAXPATHLEN);
172
173 sprintf (name1, "/proc/%d/exe", pid);
174 if (readlink (name1, name2, MAXPATHLEN) > 0)
175 {
176 free (name1);
177 return name2;
178 }
179 else
180 {
181 free (name2);
182 return name1;
183 }
184 }
185
186 /* Return non-zero if HEADER is a 64-bit ELF file. */
187
188 static int
189 elf_64_header_p (const Elf64_External_Ehdr *header)
190 {
191 return (header->e_ident[EI_MAG0] == ELFMAG0
192 && header->e_ident[EI_MAG1] == ELFMAG1
193 && header->e_ident[EI_MAG2] == ELFMAG2
194 && header->e_ident[EI_MAG3] == ELFMAG3
195 && header->e_ident[EI_CLASS] == ELFCLASS64);
196 }
197
198 /* Return non-zero if FILE is a 64-bit ELF file,
199 zero if the file is not a 64-bit ELF file,
200 and -1 if the file is not accessible or doesn't exist. */
201
202 int
203 elf_64_file_p (const char *file)
204 {
205 Elf64_External_Ehdr header;
206 int fd;
207
208 fd = open (file, O_RDONLY);
209 if (fd < 0)
210 return -1;
211
212 if (read (fd, &header, sizeof (header)) != sizeof (header))
213 {
214 close (fd);
215 return 0;
216 }
217 close (fd);
218
219 return elf_64_header_p (&header);
220 }
221
222 static void
223 delete_lwp (struct lwp_info *lwp)
224 {
225 remove_thread (get_lwp_thread (lwp));
226 remove_inferior (&all_lwps, &lwp->head);
227 free (lwp);
228 }
229
230 /* Add a process to the common process list, and set its private
231 data. */
232
233 static struct process_info *
234 linux_add_process (int pid, int attached)
235 {
236 struct process_info *proc;
237
238 /* Is this the first process? If so, then set the arch. */
239 if (all_processes.head == NULL)
240 new_inferior = 1;
241
242 proc = add_process (pid, attached);
243 proc->private = xcalloc (1, sizeof (*proc->private));
244
245 return proc;
246 }
247
248 /* Remove a process from the common process list,
249 also freeing all private data. */
250
251 static void
252 linux_remove_process (struct process_info *process)
253 {
254 free (process->private);
255 remove_process (process);
256 }
257
258 /* Handle a GNU/Linux extended wait response. If we see a clone
259 event, we need to add the new LWP to our list (and not report the
260 trap to higher layers). */
261
262 static void
263 handle_extended_wait (struct lwp_info *event_child, int wstat)
264 {
265 int event = wstat >> 16;
266 struct lwp_info *new_lwp;
267
268 if (event == PTRACE_EVENT_CLONE)
269 {
270 ptid_t ptid;
271 unsigned long new_pid;
272 int ret, status = W_STOPCODE (SIGSTOP);
273
274 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
275
276 /* If we haven't already seen the new PID stop, wait for it now. */
277 if (! pull_pid_from_list (&stopped_pids, new_pid))
278 {
279 /* The new child has a pending SIGSTOP. We can't affect it until it
280 hits the SIGSTOP, but we're already attached. */
281
282 ret = my_waitpid (new_pid, &status, __WALL);
283
284 if (ret == -1)
285 perror_with_name ("waiting for new child");
286 else if (ret != new_pid)
287 warning ("wait returned unexpected PID %d", ret);
288 else if (!WIFSTOPPED (status))
289 warning ("wait returned unexpected status 0x%x", status);
290 }
291
292 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
293
294 ptid = ptid_build (pid_of (event_child), new_pid, 0);
295 new_lwp = (struct lwp_info *) add_lwp (ptid);
296 add_thread (ptid, new_lwp);
297
298 /* Normally we will get the pending SIGSTOP. But in some cases
299 we might get another signal delivered to the group first.
300 If we do get another signal, be sure not to lose it. */
301 if (WSTOPSIG (status) == SIGSTOP)
302 {
303 if (stopping_threads)
304 new_lwp->stopped = 1;
305 else
306 ptrace (PTRACE_CONT, new_pid, 0, 0);
307 }
308 else
309 {
310 new_lwp->stop_expected = 1;
311 if (stopping_threads)
312 {
313 new_lwp->stopped = 1;
314 new_lwp->status_pending_p = 1;
315 new_lwp->status_pending = status;
316 }
317 else
318 /* Pass the signal on. This is what GDB does - except
319 shouldn't we really report it instead? */
320 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
321 }
322
323 /* Always resume the current thread. If we are stopping
324 threads, it will have a pending SIGSTOP; we may as well
325 collect it now. */
326 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
327 }
328 }
329
330 /* This function should only be called if the process got a SIGTRAP.
331 The SIGTRAP could mean several things.
332
333 On i386, where decr_pc_after_break is non-zero:
334 If we were single-stepping this process using PTRACE_SINGLESTEP,
335 we will get only the one SIGTRAP (even if the instruction we
336 stepped over was a breakpoint). The value of $eip will be the
337 next instruction.
338 If we continue the process using PTRACE_CONT, we will get a
339 SIGTRAP when we hit a breakpoint. The value of $eip will be
340 the instruction after the breakpoint (i.e. needs to be
341 decremented). If we report the SIGTRAP to GDB, we must also
342 report the undecremented PC. If we cancel the SIGTRAP, we
343 must resume at the decremented PC.
344
345 (Presumably, not yet tested) On a non-decr_pc_after_break machine
346 with hardware or kernel single-step:
347 If we single-step over a breakpoint instruction, our PC will
348 point at the following instruction. If we continue and hit a
349 breakpoint instruction, our PC will point at the breakpoint
350 instruction. */
351
352 static CORE_ADDR
353 get_stop_pc (void)
354 {
355 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
356
357 if (! get_thread_lwp (current_inferior)->stepping)
358 stop_pc -= the_low_target.decr_pc_after_break;
359
360 if (debug_threads)
361 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
362
363 return stop_pc;
364 }
365
366 static void *
367 add_lwp (ptid_t ptid)
368 {
369 struct lwp_info *lwp;
370
371 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
372 memset (lwp, 0, sizeof (*lwp));
373
374 lwp->head.id = ptid;
375
376 add_inferior_to_list (&all_lwps, &lwp->head);
377
378 return lwp;
379 }
380
381 /* Start an inferior process and returns its pid.
382 ALLARGS is a vector of program-name and args. */
383
384 static int
385 linux_create_inferior (char *program, char **allargs)
386 {
387 struct lwp_info *new_lwp;
388 int pid;
389 ptid_t ptid;
390
391 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
392 pid = vfork ();
393 #else
394 pid = fork ();
395 #endif
396 if (pid < 0)
397 perror_with_name ("fork");
398
399 if (pid == 0)
400 {
401 ptrace (PTRACE_TRACEME, 0, 0, 0);
402
403 signal (__SIGRTMIN + 1, SIG_DFL);
404
405 setpgid (0, 0);
406
407 execv (program, allargs);
408 if (errno == ENOENT)
409 execvp (program, allargs);
410
411 fprintf (stderr, "Cannot exec %s: %s.\n", program,
412 strerror (errno));
413 fflush (stderr);
414 _exit (0177);
415 }
416
417 linux_add_process (pid, 0);
418
419 ptid = ptid_build (pid, pid, 0);
420 new_lwp = add_lwp (ptid);
421 add_thread (ptid, new_lwp);
422 new_lwp->must_set_ptrace_flags = 1;
423
424 return pid;
425 }
426
427 /* Attach to an inferior process. */
428
429 static void
430 linux_attach_lwp_1 (unsigned long lwpid, int initial)
431 {
432 ptid_t ptid;
433 struct lwp_info *new_lwp;
434
435 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
436 {
437 if (!initial)
438 {
439 /* If we fail to attach to an LWP, just warn. */
440 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
441 strerror (errno), errno);
442 fflush (stderr);
443 return;
444 }
445 else
446 /* If we fail to attach to a process, report an error. */
447 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
448 strerror (errno), errno);
449 }
450
451 if (initial)
452 /* NOTE/FIXME: This lwp might have not been the tgid. */
453 ptid = ptid_build (lwpid, lwpid, 0);
454 else
455 {
456 /* Note that extracting the pid from the current inferior is
457 safe, since we're always called in the context of the same
458 process as this new thread. */
459 int pid = pid_of (get_thread_lwp (current_inferior));
460 ptid = ptid_build (pid, lwpid, 0);
461 }
462
463 new_lwp = (struct lwp_info *) add_lwp (ptid);
464 add_thread (ptid, new_lwp);
465
466
467 /* We need to wait for SIGSTOP before being able to make the next
468 ptrace call on this LWP. */
469 new_lwp->must_set_ptrace_flags = 1;
470
471 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
472 brings it to a halt.
473
474 There are several cases to consider here:
475
476 1) gdbserver has already attached to the process and is being notified
477 of a new thread that is being created.
478 In this case we should ignore that SIGSTOP and resume the process.
479 This is handled below by setting stop_expected = 1.
480
481 2) This is the first thread (the process thread), and we're attaching
482 to it via attach_inferior.
483 In this case we want the process thread to stop.
484 This is handled by having linux_attach clear stop_expected after
485 we return.
486 ??? If the process already has several threads we leave the other
487 threads running.
488
489 3) GDB is connecting to gdbserver and is requesting an enumeration of all
490 existing threads.
491 In this case we want the thread to stop.
492 FIXME: This case is currently not properly handled.
493 We should wait for the SIGSTOP but don't. Things work apparently
494 because enough time passes between when we ptrace (ATTACH) and when
495 gdb makes the next ptrace call on the thread.
496
497 On the other hand, if we are currently trying to stop all threads, we
498 should treat the new thread as if we had sent it a SIGSTOP. This works
499 because we are guaranteed that the add_lwp call above added us to the
500 end of the list, and so the new thread has not yet reached
501 wait_for_sigstop (but will). */
502 if (! stopping_threads)
503 new_lwp->stop_expected = 1;
504 }
505
506 void
507 linux_attach_lwp (unsigned long lwpid)
508 {
509 linux_attach_lwp_1 (lwpid, 0);
510 }
511
512 int
513 linux_attach (unsigned long pid)
514 {
515 struct lwp_info *lwp;
516
517 linux_attach_lwp_1 (pid, 1);
518
519 linux_add_process (pid, 1);
520
521 if (!non_stop)
522 {
523 /* Don't ignore the initial SIGSTOP if we just attached to this
524 process. It will be collected by wait shortly. */
525 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
526 ptid_build (pid, pid, 0));
527 lwp->stop_expected = 0;
528 }
529
530 return 0;
531 }
532
533 struct counter
534 {
535 int pid;
536 int count;
537 };
538
539 static int
540 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
541 {
542 struct counter *counter = args;
543
544 if (ptid_get_pid (entry->id) == counter->pid)
545 {
546 if (++counter->count > 1)
547 return 1;
548 }
549
550 return 0;
551 }
552
553 static int
554 last_thread_of_process_p (struct thread_info *thread)
555 {
556 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
557 int pid = ptid_get_pid (ptid);
558 struct counter counter = { pid , 0 };
559
560 return (find_inferior (&all_threads,
561 second_thread_of_pid_p, &counter) == NULL);
562 }
563
564 /* Kill the inferior lwp. */
565
566 static int
567 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
568 {
569 struct thread_info *thread = (struct thread_info *) entry;
570 struct lwp_info *lwp = get_thread_lwp (thread);
571 int wstat;
572 int pid = * (int *) args;
573
574 if (ptid_get_pid (entry->id) != pid)
575 return 0;
576
577 /* We avoid killing the first thread here, because of a Linux kernel (at
578 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
579 the children get a chance to be reaped, it will remain a zombie
580 forever. */
581
582 if (last_thread_of_process_p (thread))
583 {
584 if (debug_threads)
585 fprintf (stderr, "lkop: is last of process %s\n",
586 target_pid_to_str (entry->id));
587 return 0;
588 }
589
590 /* If we're killing a running inferior, make sure it is stopped
591 first, as PTRACE_KILL will not work otherwise. */
592 if (!lwp->stopped)
593 send_sigstop (&lwp->head);
594
595 do
596 {
597 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
598
599 /* Make sure it died. The loop is most likely unnecessary. */
600 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
601 } while (pid > 0 && WIFSTOPPED (wstat));
602
603 return 0;
604 }
605
606 static int
607 linux_kill (int pid)
608 {
609 struct process_info *process;
610 struct lwp_info *lwp;
611 struct thread_info *thread;
612 int wstat;
613 int lwpid;
614
615 process = find_process_pid (pid);
616 if (process == NULL)
617 return -1;
618
619 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
620
621 /* See the comment in linux_kill_one_lwp. We did not kill the first
622 thread in the list, so do so now. */
623 lwp = find_lwp_pid (pid_to_ptid (pid));
624 thread = get_lwp_thread (lwp);
625
626 if (debug_threads)
627 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
628 lwpid_of (lwp), pid);
629
630 /* If we're killing a running inferior, make sure it is stopped
631 first, as PTRACE_KILL will not work otherwise. */
632 if (!lwp->stopped)
633 send_sigstop (&lwp->head);
634
635 do
636 {
637 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
638
639 /* Make sure it died. The loop is most likely unnecessary. */
640 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
641 } while (lwpid > 0 && WIFSTOPPED (wstat));
642
643 delete_lwp (lwp);
644 linux_remove_process (process);
645 return 0;
646 }
647
648 static int
649 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
650 {
651 struct thread_info *thread = (struct thread_info *) entry;
652 struct lwp_info *lwp = get_thread_lwp (thread);
653 int pid = * (int *) args;
654
655 if (ptid_get_pid (entry->id) != pid)
656 return 0;
657
658 /* If we're detaching from a running inferior, make sure it is
659 stopped first, as PTRACE_DETACH will not work otherwise. */
660 if (!lwp->stopped)
661 {
662 int lwpid = lwpid_of (lwp);
663
664 stopping_threads = 1;
665 send_sigstop (&lwp->head);
666
667 /* If this detects a new thread through a clone event, the new
668 thread is appended to the end of the lwp list, so we'll
669 eventually detach from it. */
670 wait_for_sigstop (&lwp->head);
671 stopping_threads = 0;
672
673 /* If LWP exits while we're trying to stop it, there's nothing
674 left to do. */
675 lwp = find_lwp_pid (pid_to_ptid (lwpid));
676 if (lwp == NULL)
677 return 0;
678 }
679
680 /* Make sure the process isn't stopped at a breakpoint that's
681 no longer there. */
682 check_removed_breakpoint (lwp);
683
684 /* If this process is stopped but is expecting a SIGSTOP, then make
685 sure we take care of that now. This isn't absolutely guaranteed
686 to collect the SIGSTOP, but is fairly likely to. */
687 if (lwp->stop_expected)
688 {
689 int wstat;
690 /* Clear stop_expected, so that the SIGSTOP will be reported. */
691 lwp->stop_expected = 0;
692 if (lwp->stopped)
693 linux_resume_one_lwp (lwp, 0, 0, NULL);
694 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
695 }
696
697 /* Flush any pending changes to the process's registers. */
698 regcache_invalidate_one ((struct inferior_list_entry *)
699 get_lwp_thread (lwp));
700
701 /* Finally, let it resume. */
702 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
703
704 delete_lwp (lwp);
705 return 0;
706 }
707
708 static int
709 any_thread_of (struct inferior_list_entry *entry, void *args)
710 {
711 int *pid_p = args;
712
713 if (ptid_get_pid (entry->id) == *pid_p)
714 return 1;
715
716 return 0;
717 }
718
719 static int
720 linux_detach (int pid)
721 {
722 struct process_info *process;
723
724 process = find_process_pid (pid);
725 if (process == NULL)
726 return -1;
727
728 current_inferior =
729 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
730
731 delete_all_breakpoints ();
732 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
733 linux_remove_process (process);
734 return 0;
735 }
736
737 static void
738 linux_join (int pid)
739 {
740 int status, ret;
741 struct process_info *process;
742
743 process = find_process_pid (pid);
744 if (process == NULL)
745 return;
746
747 do {
748 ret = my_waitpid (pid, &status, 0);
749 if (WIFEXITED (status) || WIFSIGNALED (status))
750 break;
751 } while (ret != -1 || errno != ECHILD);
752 }
753
754 /* Return nonzero if the given thread is still alive. */
755 static int
756 linux_thread_alive (ptid_t ptid)
757 {
758 struct lwp_info *lwp = find_lwp_pid (ptid);
759
760 /* We assume we always know if a thread exits. If a whole process
761 exited but we still haven't been able to report it to GDB, we'll
762 hold on to the last lwp of the dead process. */
763 if (lwp != NULL)
764 return !lwp->dead;
765 else
766 return 0;
767 }
768
769 /* Return nonzero if this process stopped at a breakpoint which
770 no longer appears to be inserted. Also adjust the PC
771 appropriately to resume where the breakpoint used to be. */
772 static int
773 check_removed_breakpoint (struct lwp_info *event_child)
774 {
775 CORE_ADDR stop_pc;
776 struct thread_info *saved_inferior;
777
778 if (event_child->pending_is_breakpoint == 0)
779 return 0;
780
781 if (debug_threads)
782 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
783 lwpid_of (event_child));
784
785 saved_inferior = current_inferior;
786 current_inferior = get_lwp_thread (event_child);
787
788 stop_pc = get_stop_pc ();
789
790 /* If the PC has changed since we stopped, then we shouldn't do
791 anything. This happens if, for instance, GDB handled the
792 decr_pc_after_break subtraction itself. */
793 if (stop_pc != event_child->pending_stop_pc)
794 {
795 if (debug_threads)
796 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
797 event_child->pending_stop_pc);
798
799 event_child->pending_is_breakpoint = 0;
800 current_inferior = saved_inferior;
801 return 0;
802 }
803
804 /* If the breakpoint is still there, we will report hitting it. */
805 if ((*the_low_target.breakpoint_at) (stop_pc))
806 {
807 if (debug_threads)
808 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
809 current_inferior = saved_inferior;
810 return 0;
811 }
812
813 if (debug_threads)
814 fprintf (stderr, "Removed breakpoint.\n");
815
816 /* For decr_pc_after_break targets, here is where we perform the
817 decrement. We go immediately from this function to resuming,
818 and can not safely call get_stop_pc () again. */
819 if (the_low_target.set_pc != NULL)
820 {
821 if (debug_threads)
822 fprintf (stderr, "Set pc to 0x%lx\n", (long) stop_pc);
823 (*the_low_target.set_pc) (stop_pc);
824 }
825
826 /* We consumed the pending SIGTRAP. */
827 event_child->pending_is_breakpoint = 0;
828 event_child->status_pending_p = 0;
829 event_child->status_pending = 0;
830
831 current_inferior = saved_inferior;
832 return 1;
833 }
834
835 /* Return 1 if this lwp has an interesting status pending. This
836 function may silently resume an inferior lwp. */
837 static int
838 status_pending_p (struct inferior_list_entry *entry, void *arg)
839 {
840 struct lwp_info *lwp = (struct lwp_info *) entry;
841 ptid_t ptid = * (ptid_t *) arg;
842
843 /* Check if we're only interested in events from a specific process
844 or its lwps. */
845 if (!ptid_equal (minus_one_ptid, ptid)
846 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
847 return 0;
848
849 if (lwp->status_pending_p && !lwp->suspended)
850 if (check_removed_breakpoint (lwp))
851 {
852 /* This thread was stopped at a breakpoint, and the breakpoint
853 is now gone. We were told to continue (or step...) all threads,
854 so GDB isn't trying to single-step past this breakpoint.
855 So instead of reporting the old SIGTRAP, pretend we got to
856 the breakpoint just after it was removed instead of just
857 before; resume the process. */
858 linux_resume_one_lwp (lwp, 0, 0, NULL);
859 return 0;
860 }
861
862 return (lwp->status_pending_p && !lwp->suspended);
863 }
864
865 static int
866 same_lwp (struct inferior_list_entry *entry, void *data)
867 {
868 ptid_t ptid = *(ptid_t *) data;
869 int lwp;
870
871 if (ptid_get_lwp (ptid) != 0)
872 lwp = ptid_get_lwp (ptid);
873 else
874 lwp = ptid_get_pid (ptid);
875
876 if (ptid_get_lwp (entry->id) == lwp)
877 return 1;
878
879 return 0;
880 }
881
882 struct lwp_info *
883 find_lwp_pid (ptid_t ptid)
884 {
885 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
886 }
887
888 static struct lwp_info *
889 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
890 {
891 int ret;
892 int to_wait_for = -1;
893 struct lwp_info *child = NULL;
894
895 if (debug_threads)
896 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
897
898 if (ptid_equal (ptid, minus_one_ptid))
899 to_wait_for = -1; /* any child */
900 else
901 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
902
903 options |= __WALL;
904
905 retry:
906
907 ret = my_waitpid (to_wait_for, wstatp, options);
908 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
909 return NULL;
910 else if (ret == -1)
911 perror_with_name ("waitpid");
912
913 if (debug_threads
914 && (!WIFSTOPPED (*wstatp)
915 || (WSTOPSIG (*wstatp) != 32
916 && WSTOPSIG (*wstatp) != 33)))
917 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
918
919 child = find_lwp_pid (pid_to_ptid (ret));
920
921 /* If we didn't find a process, one of two things presumably happened:
922 - A process we started and then detached from has exited. Ignore it.
923 - A process we are controlling has forked and the new child's stop
924 was reported to us by the kernel. Save its PID. */
925 if (child == NULL && WIFSTOPPED (*wstatp))
926 {
927 add_pid_to_list (&stopped_pids, ret);
928 goto retry;
929 }
930 else if (child == NULL)
931 goto retry;
932
933 child->stopped = 1;
934 child->pending_is_breakpoint = 0;
935
936 child->last_status = *wstatp;
937
938 /* Architecture-specific setup after inferior is running.
939 This needs to happen after we have attached to the inferior
940 and it is stopped for the first time, but before we access
941 any inferior registers. */
942 if (new_inferior)
943 {
944 the_low_target.arch_setup ();
945 #ifdef HAVE_LINUX_REGSETS
946 memset (disabled_regsets, 0, num_regsets);
947 #endif
948 new_inferior = 0;
949 }
950
951 if (debug_threads
952 && WIFSTOPPED (*wstatp)
953 && the_low_target.get_pc != NULL)
954 {
955 struct thread_info *saved_inferior = current_inferior;
956 CORE_ADDR pc;
957
958 current_inferior = (struct thread_info *)
959 find_inferior_id (&all_threads, child->head.id);
960 pc = (*the_low_target.get_pc) ();
961 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
962 current_inferior = saved_inferior;
963 }
964
965 return child;
966 }
967
968 /* Wait for an event from child PID. If PID is -1, wait for any
969 child. Store the stop status through the status pointer WSTAT.
970 OPTIONS is passed to the waitpid call. Return 0 if no child stop
971 event was found and OPTIONS contains WNOHANG. Return the PID of
972 the stopped child otherwise. */
973
974 static int
975 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
976 {
977 CORE_ADDR stop_pc;
978 struct lwp_info *event_child = NULL;
979 int bp_status;
980 struct lwp_info *requested_child = NULL;
981
982 /* Check for a lwp with a pending status. */
983 /* It is possible that the user changed the pending task's registers since
984 it stopped. We correctly handle the change of PC if we hit a breakpoint
985 (in check_removed_breakpoint); signals should be reported anyway. */
986
987 if (ptid_equal (ptid, minus_one_ptid)
988 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
989 {
990 event_child = (struct lwp_info *)
991 find_inferior (&all_lwps, status_pending_p, &ptid);
992 if (debug_threads && event_child)
993 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
994 }
995 else
996 {
997 requested_child = find_lwp_pid (ptid);
998 if (requested_child->status_pending_p
999 && !check_removed_breakpoint (requested_child))
1000 event_child = requested_child;
1001 }
1002
1003 if (event_child != NULL)
1004 {
1005 if (debug_threads)
1006 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1007 lwpid_of (event_child), event_child->status_pending);
1008 *wstat = event_child->status_pending;
1009 event_child->status_pending_p = 0;
1010 event_child->status_pending = 0;
1011 current_inferior = get_lwp_thread (event_child);
1012 return lwpid_of (event_child);
1013 }
1014
1015 /* We only enter this loop if no process has a pending wait status. Thus
1016 any action taken in response to a wait status inside this loop is
1017 responding as soon as we detect the status, not after any pending
1018 events. */
1019 while (1)
1020 {
1021 event_child = linux_wait_for_lwp (ptid, wstat, options);
1022
1023 if ((options & WNOHANG) && event_child == NULL)
1024 return 0;
1025
1026 if (event_child == NULL)
1027 error ("event from unknown child");
1028
1029 current_inferior = get_lwp_thread (event_child);
1030
1031 /* Check for thread exit. */
1032 if (! WIFSTOPPED (*wstat))
1033 {
1034 if (debug_threads)
1035 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1036
1037 /* If the last thread is exiting, just return. */
1038 if (last_thread_of_process_p (current_inferior))
1039 {
1040 if (debug_threads)
1041 fprintf (stderr, "LWP %ld is last lwp of process\n",
1042 lwpid_of (event_child));
1043 return lwpid_of (event_child);
1044 }
1045
1046 delete_lwp (event_child);
1047
1048 if (!non_stop)
1049 {
1050 current_inferior = (struct thread_info *) all_threads.head;
1051 if (debug_threads)
1052 fprintf (stderr, "Current inferior is now %ld\n",
1053 lwpid_of (get_thread_lwp (current_inferior)));
1054 }
1055 else
1056 {
1057 current_inferior = NULL;
1058 if (debug_threads)
1059 fprintf (stderr, "Current inferior is now <NULL>\n");
1060 }
1061
1062 /* If we were waiting for this particular child to do something...
1063 well, it did something. */
1064 if (requested_child != NULL)
1065 return lwpid_of (event_child);
1066
1067 /* Wait for a more interesting event. */
1068 continue;
1069 }
1070
1071 if (event_child->must_set_ptrace_flags)
1072 {
1073 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1074 0, PTRACE_O_TRACECLONE);
1075 event_child->must_set_ptrace_flags = 0;
1076 }
1077
1078 if (WIFSTOPPED (*wstat)
1079 && WSTOPSIG (*wstat) == SIGSTOP
1080 && event_child->stop_expected)
1081 {
1082 if (debug_threads)
1083 fprintf (stderr, "Expected stop.\n");
1084 event_child->stop_expected = 0;
1085 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
1086 continue;
1087 }
1088
1089 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1090 && *wstat >> 16 != 0)
1091 {
1092 handle_extended_wait (event_child, *wstat);
1093 continue;
1094 }
1095
1096 /* If GDB is not interested in this signal, don't stop other
1097 threads, and don't report it to GDB. Just resume the
1098 inferior right away. We do this for threading-related
1099 signals as well as any that GDB specifically requested we
1100 ignore. But never ignore SIGSTOP if we sent it ourselves,
1101 and do not ignore signals when stepping - they may require
1102 special handling to skip the signal handler. */
1103 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1104 thread library? */
1105 if (WIFSTOPPED (*wstat)
1106 && !event_child->stepping
1107 && (
1108 #ifdef USE_THREAD_DB
1109 (current_process ()->private->thread_db_active
1110 && (WSTOPSIG (*wstat) == __SIGRTMIN
1111 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1112 ||
1113 #endif
1114 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1115 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1116 {
1117 siginfo_t info, *info_p;
1118
1119 if (debug_threads)
1120 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1121 WSTOPSIG (*wstat), lwpid_of (event_child));
1122
1123 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1124 info_p = &info;
1125 else
1126 info_p = NULL;
1127 linux_resume_one_lwp (event_child,
1128 event_child->stepping,
1129 WSTOPSIG (*wstat), info_p);
1130 continue;
1131 }
1132
1133 /* If this event was not handled above, and is not a SIGTRAP, report
1134 it. */
1135 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1136 return lwpid_of (event_child);
1137
1138 /* If this target does not support breakpoints, we simply report the
1139 SIGTRAP; it's of no concern to us. */
1140 if (the_low_target.get_pc == NULL)
1141 return lwpid_of (event_child);
1142
1143 stop_pc = get_stop_pc ();
1144
1145 /* bp_reinsert will only be set if we were single-stepping.
1146 Notice that we will resume the process after hitting
1147 a gdbserver breakpoint; single-stepping to/over one
1148 is not supported (yet). */
1149 if (event_child->bp_reinsert != 0)
1150 {
1151 if (debug_threads)
1152 fprintf (stderr, "Reinserted breakpoint.\n");
1153 reinsert_breakpoint (event_child->bp_reinsert);
1154 event_child->bp_reinsert = 0;
1155
1156 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1157 linux_resume_one_lwp (event_child, 0, 0, NULL);
1158 continue;
1159 }
1160
1161 bp_status = check_breakpoints (stop_pc);
1162
1163 if (bp_status != 0)
1164 {
1165 if (debug_threads)
1166 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1167
1168 /* We hit one of our own breakpoints. We mark it as a pending
1169 breakpoint, so that check_removed_breakpoint () will do the PC
1170 adjustment for us at the appropriate time. */
1171 event_child->pending_is_breakpoint = 1;
1172 event_child->pending_stop_pc = stop_pc;
1173
1174 /* We may need to put the breakpoint back. We continue in the event
1175 loop instead of simply replacing the breakpoint right away,
1176 in order to not lose signals sent to the thread that hit the
1177 breakpoint. Unfortunately this increases the window where another
1178 thread could sneak past the removed breakpoint. For the current
1179 use of server-side breakpoints (thread creation) this is
1180 acceptable; but it needs to be considered before this breakpoint
1181 mechanism can be used in more general ways. For some breakpoints
1182 it may be necessary to stop all other threads, but that should
1183 be avoided where possible.
1184
1185 If breakpoint_reinsert_addr is NULL, that means that we can
1186 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1187 mark it for reinsertion, and single-step.
1188
1189 Otherwise, call the target function to figure out where we need
1190 our temporary breakpoint, create it, and continue executing this
1191 process. */
1192
1193 /* NOTE: we're lifting breakpoints in non-stop mode. This
1194 is currently only used for thread event breakpoints, so
1195 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1196 events. */
1197 if (bp_status == 2)
1198 /* No need to reinsert. */
1199 linux_resume_one_lwp (event_child, 0, 0, NULL);
1200 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1201 {
1202 event_child->bp_reinsert = stop_pc;
1203 uninsert_breakpoint (stop_pc);
1204 linux_resume_one_lwp (event_child, 1, 0, NULL);
1205 }
1206 else
1207 {
1208 reinsert_breakpoint_by_bp
1209 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1210 linux_resume_one_lwp (event_child, 0, 0, NULL);
1211 }
1212
1213 continue;
1214 }
1215
1216 if (debug_threads)
1217 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1218
1219 /* If we were single-stepping, we definitely want to report the
1220 SIGTRAP. Although the single-step operation has completed,
1221 do not clear clear the stepping flag yet; we need to check it
1222 in wait_for_sigstop. */
1223 if (event_child->stepping)
1224 return lwpid_of (event_child);
1225
1226 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1227 Check if it is a breakpoint, and if so mark the process information
1228 accordingly. This will handle both the necessary fiddling with the
1229 PC on decr_pc_after_break targets and suppressing extra threads
1230 hitting a breakpoint if two hit it at once and then GDB removes it
1231 after the first is reported. Arguably it would be better to report
1232 multiple threads hitting breakpoints simultaneously, but the current
1233 remote protocol does not allow this. */
1234 if ((*the_low_target.breakpoint_at) (stop_pc))
1235 {
1236 event_child->pending_is_breakpoint = 1;
1237 event_child->pending_stop_pc = stop_pc;
1238 }
1239
1240 return lwpid_of (event_child);
1241 }
1242
1243 /* NOTREACHED */
1244 return 0;
1245 }
1246
1247 static int
1248 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1249 {
1250 ptid_t wait_ptid;
1251
1252 if (ptid_is_pid (ptid))
1253 {
1254 /* A request to wait for a specific tgid. This is not possible
1255 with waitpid, so instead, we wait for any child, and leave
1256 children we're not interested in right now with a pending
1257 status to report later. */
1258 wait_ptid = minus_one_ptid;
1259 }
1260 else
1261 wait_ptid = ptid;
1262
1263 while (1)
1264 {
1265 int event_pid;
1266
1267 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1268
1269 if (event_pid > 0
1270 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1271 {
1272 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1273
1274 if (! WIFSTOPPED (*wstat))
1275 mark_lwp_dead (event_child, *wstat);
1276 else
1277 {
1278 event_child->status_pending_p = 1;
1279 event_child->status_pending = *wstat;
1280 }
1281 }
1282 else
1283 return event_pid;
1284 }
1285 }
1286
1287 /* Wait for process, returns status. */
1288
1289 static ptid_t
1290 linux_wait_1 (ptid_t ptid,
1291 struct target_waitstatus *ourstatus, int target_options)
1292 {
1293 int w;
1294 struct thread_info *thread = NULL;
1295 struct lwp_info *lwp = NULL;
1296 int options;
1297 int pid;
1298
1299 /* Translate generic target options into linux options. */
1300 options = __WALL;
1301 if (target_options & TARGET_WNOHANG)
1302 options |= WNOHANG;
1303
1304 retry:
1305 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1306
1307 /* If we were only supposed to resume one thread, only wait for
1308 that thread - if it's still alive. If it died, however - which
1309 can happen if we're coming from the thread death case below -
1310 then we need to make sure we restart the other threads. We could
1311 pick a thread at random or restart all; restarting all is less
1312 arbitrary. */
1313 if (!non_stop
1314 && !ptid_equal (cont_thread, null_ptid)
1315 && !ptid_equal (cont_thread, minus_one_ptid))
1316 {
1317 thread = (struct thread_info *) find_inferior_id (&all_threads,
1318 cont_thread);
1319
1320 /* No stepping, no signal - unless one is pending already, of course. */
1321 if (thread == NULL)
1322 {
1323 struct thread_resume resume_info;
1324 resume_info.thread = minus_one_ptid;
1325 resume_info.kind = resume_continue;
1326 resume_info.sig = 0;
1327 linux_resume (&resume_info, 1);
1328 }
1329 else
1330 ptid = cont_thread;
1331 }
1332
1333 pid = linux_wait_for_event (ptid, &w, options);
1334 if (pid == 0) /* only if TARGET_WNOHANG */
1335 return null_ptid;
1336
1337 lwp = get_thread_lwp (current_inferior);
1338
1339 /* If we are waiting for a particular child, and it exited,
1340 linux_wait_for_event will return its exit status. Similarly if
1341 the last child exited. If this is not the last child, however,
1342 do not report it as exited until there is a 'thread exited' response
1343 available in the remote protocol. Instead, just wait for another event.
1344 This should be safe, because if the thread crashed we will already
1345 have reported the termination signal to GDB; that should stop any
1346 in-progress stepping operations, etc.
1347
1348 Report the exit status of the last thread to exit. This matches
1349 LinuxThreads' behavior. */
1350
1351 if (last_thread_of_process_p (current_inferior))
1352 {
1353 if (WIFEXITED (w) || WIFSIGNALED (w))
1354 {
1355 int pid = pid_of (lwp);
1356 struct process_info *process = find_process_pid (pid);
1357
1358 delete_lwp (lwp);
1359 linux_remove_process (process);
1360
1361 current_inferior = NULL;
1362
1363 if (WIFEXITED (w))
1364 {
1365 ourstatus->kind = TARGET_WAITKIND_EXITED;
1366 ourstatus->value.integer = WEXITSTATUS (w);
1367
1368 if (debug_threads)
1369 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1370 }
1371 else
1372 {
1373 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1374 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1375
1376 if (debug_threads)
1377 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1378
1379 }
1380
1381 return pid_to_ptid (pid);
1382 }
1383 }
1384 else
1385 {
1386 if (!WIFSTOPPED (w))
1387 goto retry;
1388 }
1389
1390 /* In all-stop, stop all threads. Be careful to only do this if
1391 we're about to report an event to GDB. */
1392 if (!non_stop)
1393 stop_all_lwps ();
1394
1395 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1396
1397 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1398 {
1399 /* A thread that has been requested to stop by GDB with vCont;t,
1400 and it stopped cleanly, so report as SIG0. The use of
1401 SIGSTOP is an implementation detail. */
1402 ourstatus->value.sig = TARGET_SIGNAL_0;
1403 }
1404 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1405 {
1406 /* A thread that has been requested to stop by GDB with vCont;t,
1407 but, it stopped for other reasons. Set stop_expected so the
1408 pending SIGSTOP is ignored and the LWP is resumed. */
1409 lwp->stop_expected = 1;
1410 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1411 }
1412 else
1413 {
1414 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1415 }
1416
1417 if (debug_threads)
1418 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1419 target_pid_to_str (lwp->head.id),
1420 ourstatus->kind,
1421 ourstatus->value.sig);
1422
1423 return lwp->head.id;
1424 }
1425
1426 /* Get rid of any pending event in the pipe. */
1427 static void
1428 async_file_flush (void)
1429 {
1430 int ret;
1431 char buf;
1432
1433 do
1434 ret = read (linux_event_pipe[0], &buf, 1);
1435 while (ret >= 0 || (ret == -1 && errno == EINTR));
1436 }
1437
1438 /* Put something in the pipe, so the event loop wakes up. */
1439 static void
1440 async_file_mark (void)
1441 {
1442 int ret;
1443
1444 async_file_flush ();
1445
1446 do
1447 ret = write (linux_event_pipe[1], "+", 1);
1448 while (ret == 0 || (ret == -1 && errno == EINTR));
1449
1450 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1451 be awakened anyway. */
1452 }
1453
1454 static ptid_t
1455 linux_wait (ptid_t ptid,
1456 struct target_waitstatus *ourstatus, int target_options)
1457 {
1458 ptid_t event_ptid;
1459
1460 if (debug_threads)
1461 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1462
1463 /* Flush the async file first. */
1464 if (target_is_async_p ())
1465 async_file_flush ();
1466
1467 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1468
1469 /* If at least one stop was reported, there may be more. A single
1470 SIGCHLD can signal more than one child stop. */
1471 if (target_is_async_p ()
1472 && (target_options & TARGET_WNOHANG) != 0
1473 && !ptid_equal (event_ptid, null_ptid))
1474 async_file_mark ();
1475
1476 return event_ptid;
1477 }
1478
1479 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1480 thread groups are in use, we need to use tkill. */
1481
1482 static int
1483 kill_lwp (unsigned long lwpid, int signo)
1484 {
1485 static int tkill_failed;
1486
1487 errno = 0;
1488
1489 #ifdef SYS_tkill
1490 if (!tkill_failed)
1491 {
1492 int ret = syscall (SYS_tkill, lwpid, signo);
1493 if (errno != ENOSYS)
1494 return ret;
1495 errno = 0;
1496 tkill_failed = 1;
1497 }
1498 #endif
1499
1500 return kill (lwpid, signo);
1501 }
1502
1503 static void
1504 send_sigstop (struct inferior_list_entry *entry)
1505 {
1506 struct lwp_info *lwp = (struct lwp_info *) entry;
1507 int pid;
1508
1509 if (lwp->stopped)
1510 return;
1511
1512 pid = lwpid_of (lwp);
1513
1514 /* If we already have a pending stop signal for this process, don't
1515 send another. */
1516 if (lwp->stop_expected)
1517 {
1518 if (debug_threads)
1519 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1520
1521 /* We clear the stop_expected flag so that wait_for_sigstop
1522 will receive the SIGSTOP event (instead of silently resuming and
1523 waiting again). It'll be reset below. */
1524 lwp->stop_expected = 0;
1525 return;
1526 }
1527
1528 if (debug_threads)
1529 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1530
1531 kill_lwp (pid, SIGSTOP);
1532 }
1533
1534 static void
1535 mark_lwp_dead (struct lwp_info *lwp, int wstat)
1536 {
1537 /* It's dead, really. */
1538 lwp->dead = 1;
1539
1540 /* Store the exit status for later. */
1541 lwp->status_pending_p = 1;
1542 lwp->status_pending = wstat;
1543
1544 /* So that check_removed_breakpoint doesn't try to figure out if
1545 this is stopped at a breakpoint. */
1546 lwp->pending_is_breakpoint = 0;
1547
1548 /* Prevent trying to stop it. */
1549 lwp->stopped = 1;
1550
1551 /* No further stops are expected from a dead lwp. */
1552 lwp->stop_expected = 0;
1553 }
1554
1555 static void
1556 wait_for_sigstop (struct inferior_list_entry *entry)
1557 {
1558 struct lwp_info *lwp = (struct lwp_info *) entry;
1559 struct thread_info *saved_inferior;
1560 int wstat;
1561 ptid_t saved_tid;
1562 ptid_t ptid;
1563
1564 if (lwp->stopped)
1565 return;
1566
1567 saved_inferior = current_inferior;
1568 if (saved_inferior != NULL)
1569 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1570 else
1571 saved_tid = null_ptid; /* avoid bogus unused warning */
1572
1573 ptid = lwp->head.id;
1574
1575 linux_wait_for_event (ptid, &wstat, __WALL);
1576
1577 /* If we stopped with a non-SIGSTOP signal, save it for later
1578 and record the pending SIGSTOP. If the process exited, just
1579 return. */
1580 if (WIFSTOPPED (wstat)
1581 && WSTOPSIG (wstat) != SIGSTOP)
1582 {
1583 if (debug_threads)
1584 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1585 lwpid_of (lwp), wstat);
1586
1587 /* Do not leave a pending single-step finish to be reported to
1588 the client. The client will give us a new action for this
1589 thread, possibly a continue request --- otherwise, the client
1590 would consider this pending SIGTRAP reported later a spurious
1591 signal. */
1592 if (WSTOPSIG (wstat) == SIGTRAP
1593 && lwp->stepping
1594 && !linux_stopped_by_watchpoint ())
1595 {
1596 if (debug_threads)
1597 fprintf (stderr, " single-step SIGTRAP ignored\n");
1598 }
1599 else
1600 {
1601 lwp->status_pending_p = 1;
1602 lwp->status_pending = wstat;
1603 }
1604 lwp->stop_expected = 1;
1605 }
1606 else if (!WIFSTOPPED (wstat))
1607 {
1608 if (debug_threads)
1609 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1610 lwpid_of (lwp));
1611
1612 /* Leave this status pending for the next time we're able to
1613 report it. In the mean time, we'll report this lwp as dead
1614 to GDB, so GDB doesn't try to read registers and memory from
1615 it. */
1616 mark_lwp_dead (lwp, wstat);
1617 }
1618
1619 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1620 current_inferior = saved_inferior;
1621 else
1622 {
1623 if (debug_threads)
1624 fprintf (stderr, "Previously current thread died.\n");
1625
1626 if (non_stop)
1627 {
1628 /* We can't change the current inferior behind GDB's back,
1629 otherwise, a subsequent command may apply to the wrong
1630 process. */
1631 current_inferior = NULL;
1632 }
1633 else
1634 {
1635 /* Set a valid thread as current. */
1636 set_desired_inferior (0);
1637 }
1638 }
1639 }
1640
1641 static void
1642 stop_all_lwps (void)
1643 {
1644 stopping_threads = 1;
1645 for_each_inferior (&all_lwps, send_sigstop);
1646 for_each_inferior (&all_lwps, wait_for_sigstop);
1647 stopping_threads = 0;
1648 }
1649
1650 /* Resume execution of the inferior process.
1651 If STEP is nonzero, single-step it.
1652 If SIGNAL is nonzero, give it that signal. */
1653
1654 static void
1655 linux_resume_one_lwp (struct lwp_info *lwp,
1656 int step, int signal, siginfo_t *info)
1657 {
1658 struct thread_info *saved_inferior;
1659
1660 if (lwp->stopped == 0)
1661 return;
1662
1663 /* If we have pending signals or status, and a new signal, enqueue the
1664 signal. Also enqueue the signal if we are waiting to reinsert a
1665 breakpoint; it will be picked up again below. */
1666 if (signal != 0
1667 && (lwp->status_pending_p || lwp->pending_signals != NULL
1668 || lwp->bp_reinsert != 0))
1669 {
1670 struct pending_signals *p_sig;
1671 p_sig = xmalloc (sizeof (*p_sig));
1672 p_sig->prev = lwp->pending_signals;
1673 p_sig->signal = signal;
1674 if (info == NULL)
1675 memset (&p_sig->info, 0, sizeof (siginfo_t));
1676 else
1677 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1678 lwp->pending_signals = p_sig;
1679 }
1680
1681 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1682 return;
1683
1684 saved_inferior = current_inferior;
1685 current_inferior = get_lwp_thread (lwp);
1686
1687 if (debug_threads)
1688 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1689 lwpid_of (lwp), step ? "step" : "continue", signal,
1690 lwp->stop_expected ? "expected" : "not expected");
1691
1692 /* This bit needs some thinking about. If we get a signal that
1693 we must report while a single-step reinsert is still pending,
1694 we often end up resuming the thread. It might be better to
1695 (ew) allow a stack of pending events; then we could be sure that
1696 the reinsert happened right away and not lose any signals.
1697
1698 Making this stack would also shrink the window in which breakpoints are
1699 uninserted (see comment in linux_wait_for_lwp) but not enough for
1700 complete correctness, so it won't solve that problem. It may be
1701 worthwhile just to solve this one, however. */
1702 if (lwp->bp_reinsert != 0)
1703 {
1704 if (debug_threads)
1705 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1706 if (step == 0)
1707 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1708 step = 1;
1709
1710 /* Postpone any pending signal. It was enqueued above. */
1711 signal = 0;
1712 }
1713
1714 check_removed_breakpoint (lwp);
1715
1716 if (debug_threads && the_low_target.get_pc != NULL)
1717 {
1718 CORE_ADDR pc = (*the_low_target.get_pc) ();
1719 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
1720 }
1721
1722 /* If we have pending signals, consume one unless we are trying to reinsert
1723 a breakpoint. */
1724 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1725 {
1726 struct pending_signals **p_sig;
1727
1728 p_sig = &lwp->pending_signals;
1729 while ((*p_sig)->prev != NULL)
1730 p_sig = &(*p_sig)->prev;
1731
1732 signal = (*p_sig)->signal;
1733 if ((*p_sig)->info.si_signo != 0)
1734 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1735
1736 free (*p_sig);
1737 *p_sig = NULL;
1738 }
1739
1740 regcache_invalidate_one ((struct inferior_list_entry *)
1741 get_lwp_thread (lwp));
1742 errno = 0;
1743 lwp->stopped = 0;
1744 lwp->stepping = step;
1745 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1746
1747 current_inferior = saved_inferior;
1748 if (errno)
1749 {
1750 /* ESRCH from ptrace either means that the thread was already
1751 running (an error) or that it is gone (a race condition). If
1752 it's gone, we will get a notification the next time we wait,
1753 so we can ignore the error. We could differentiate these
1754 two, but it's tricky without waiting; the thread still exists
1755 as a zombie, so sending it signal 0 would succeed. So just
1756 ignore ESRCH. */
1757 if (errno == ESRCH)
1758 return;
1759
1760 perror_with_name ("ptrace");
1761 }
1762 }
1763
1764 struct thread_resume_array
1765 {
1766 struct thread_resume *resume;
1767 size_t n;
1768 };
1769
1770 /* This function is called once per thread. We look up the thread
1771 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1772 resume request.
1773
1774 This algorithm is O(threads * resume elements), but resume elements
1775 is small (and will remain small at least until GDB supports thread
1776 suspension). */
1777 static int
1778 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1779 {
1780 struct lwp_info *lwp;
1781 struct thread_info *thread;
1782 int ndx;
1783 struct thread_resume_array *r;
1784
1785 thread = (struct thread_info *) entry;
1786 lwp = get_thread_lwp (thread);
1787 r = arg;
1788
1789 for (ndx = 0; ndx < r->n; ndx++)
1790 {
1791 ptid_t ptid = r->resume[ndx].thread;
1792 if (ptid_equal (ptid, minus_one_ptid)
1793 || ptid_equal (ptid, entry->id)
1794 || (ptid_is_pid (ptid)
1795 && (ptid_get_pid (ptid) == pid_of (lwp)))
1796 || (ptid_get_lwp (ptid) == -1
1797 && (ptid_get_pid (ptid) == pid_of (lwp))))
1798 {
1799 lwp->resume = &r->resume[ndx];
1800 return 0;
1801 }
1802 }
1803
1804 /* No resume action for this thread. */
1805 lwp->resume = NULL;
1806
1807 return 0;
1808 }
1809
1810
1811 /* Set *FLAG_P if this lwp has an interesting status pending. */
1812 static int
1813 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1814 {
1815 struct lwp_info *lwp = (struct lwp_info *) entry;
1816
1817 /* LWPs which will not be resumed are not interesting, because
1818 we might not wait for them next time through linux_wait. */
1819 if (lwp->resume == NULL)
1820 return 0;
1821
1822 /* If this thread has a removed breakpoint, we won't have any
1823 events to report later, so check now. check_removed_breakpoint
1824 may clear status_pending_p. We avoid calling check_removed_breakpoint
1825 for any thread that we are not otherwise going to resume - this
1826 lets us preserve stopped status when two threads hit a breakpoint.
1827 GDB removes the breakpoint to single-step a particular thread
1828 past it, then re-inserts it and resumes all threads. We want
1829 to report the second thread without resuming it in the interim. */
1830 if (lwp->status_pending_p)
1831 check_removed_breakpoint (lwp);
1832
1833 if (lwp->status_pending_p)
1834 * (int *) flag_p = 1;
1835
1836 return 0;
1837 }
1838
1839 /* This function is called once per thread. We check the thread's resume
1840 request, which will tell us whether to resume, step, or leave the thread
1841 stopped; and what signal, if any, it should be sent.
1842
1843 For threads which we aren't explicitly told otherwise, we preserve
1844 the stepping flag; this is used for stepping over gdbserver-placed
1845 breakpoints.
1846
1847 If pending_flags was set in any thread, we queue any needed
1848 signals, since we won't actually resume. We already have a pending
1849 event to report, so we don't need to preserve any step requests;
1850 they should be re-issued if necessary. */
1851
1852 static int
1853 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1854 {
1855 struct lwp_info *lwp;
1856 struct thread_info *thread;
1857 int step;
1858 int pending_flag = * (int *) arg;
1859
1860 thread = (struct thread_info *) entry;
1861 lwp = get_thread_lwp (thread);
1862
1863 if (lwp->resume == NULL)
1864 return 0;
1865
1866 if (lwp->resume->kind == resume_stop)
1867 {
1868 if (debug_threads)
1869 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1870
1871 if (!lwp->stopped)
1872 {
1873 if (debug_threads)
1874 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
1875
1876 lwp->suspended = 1;
1877 send_sigstop (&lwp->head);
1878 }
1879 else
1880 {
1881 if (debug_threads)
1882 {
1883 if (lwp->suspended)
1884 fprintf (stderr, "already stopped/suspended LWP %ld\n",
1885 lwpid_of (lwp));
1886 else
1887 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
1888 lwpid_of (lwp));
1889 }
1890
1891 /* Make sure we leave the LWP suspended, so we don't try to
1892 resume it without GDB telling us to. FIXME: The LWP may
1893 have been stopped in an internal event that was not meant
1894 to be notified back to GDB (e.g., gdbserver breakpoint),
1895 so we should be reporting a stop event in that case
1896 too. */
1897 lwp->suspended = 1;
1898 }
1899
1900 /* For stop requests, we're done. */
1901 lwp->resume = NULL;
1902 return 0;
1903 }
1904 else
1905 lwp->suspended = 0;
1906
1907 /* If this thread which is about to be resumed has a pending status,
1908 then don't resume any threads - we can just report the pending
1909 status. Make sure to queue any signals that would otherwise be
1910 sent. In all-stop mode, we do this decision based on if *any*
1911 thread has a pending status. */
1912 if (non_stop)
1913 resume_status_pending_p (&lwp->head, &pending_flag);
1914
1915 if (!pending_flag)
1916 {
1917 if (debug_threads)
1918 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
1919
1920 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
1921 && lwp->stepping
1922 && lwp->pending_is_breakpoint)
1923 step = 1;
1924 else
1925 step = (lwp->resume->kind == resume_step);
1926
1927 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
1928 }
1929 else
1930 {
1931 if (debug_threads)
1932 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
1933
1934 /* If we have a new signal, enqueue the signal. */
1935 if (lwp->resume->sig != 0)
1936 {
1937 struct pending_signals *p_sig;
1938 p_sig = xmalloc (sizeof (*p_sig));
1939 p_sig->prev = lwp->pending_signals;
1940 p_sig->signal = lwp->resume->sig;
1941 memset (&p_sig->info, 0, sizeof (siginfo_t));
1942
1943 /* If this is the same signal we were previously stopped by,
1944 make sure to queue its siginfo. We can ignore the return
1945 value of ptrace; if it fails, we'll skip
1946 PTRACE_SETSIGINFO. */
1947 if (WIFSTOPPED (lwp->last_status)
1948 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1949 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1950
1951 lwp->pending_signals = p_sig;
1952 }
1953 }
1954
1955 lwp->resume = NULL;
1956 return 0;
1957 }
1958
1959 static void
1960 linux_resume (struct thread_resume *resume_info, size_t n)
1961 {
1962 int pending_flag;
1963 struct thread_resume_array array = { resume_info, n };
1964
1965 find_inferior (&all_threads, linux_set_resume_request, &array);
1966
1967 /* If there is a thread which would otherwise be resumed, which
1968 has a pending status, then don't resume any threads - we can just
1969 report the pending status. Make sure to queue any signals
1970 that would otherwise be sent. In non-stop mode, we'll apply this
1971 logic to each thread individually. */
1972 pending_flag = 0;
1973 if (!non_stop)
1974 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1975
1976 if (debug_threads)
1977 {
1978 if (pending_flag)
1979 fprintf (stderr, "Not resuming, pending status\n");
1980 else
1981 fprintf (stderr, "Resuming, no pending status\n");
1982 }
1983
1984 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
1985 }
1986
1987 #ifdef HAVE_LINUX_USRREGS
1988
1989 int
1990 register_addr (int regnum)
1991 {
1992 int addr;
1993
1994 if (regnum < 0 || regnum >= the_low_target.num_regs)
1995 error ("Invalid register number %d.", regnum);
1996
1997 addr = the_low_target.regmap[regnum];
1998
1999 return addr;
2000 }
2001
2002 /* Fetch one register. */
2003 static void
2004 fetch_register (int regno)
2005 {
2006 CORE_ADDR regaddr;
2007 int i, size;
2008 char *buf;
2009 int pid;
2010
2011 if (regno >= the_low_target.num_regs)
2012 return;
2013 if ((*the_low_target.cannot_fetch_register) (regno))
2014 return;
2015
2016 regaddr = register_addr (regno);
2017 if (regaddr == -1)
2018 return;
2019
2020 pid = lwpid_of (get_thread_lwp (current_inferior));
2021 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2022 & - sizeof (PTRACE_XFER_TYPE));
2023 buf = alloca (size);
2024 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2025 {
2026 errno = 0;
2027 *(PTRACE_XFER_TYPE *) (buf + i) =
2028 ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0);
2029 regaddr += sizeof (PTRACE_XFER_TYPE);
2030 if (errno != 0)
2031 {
2032 /* Warning, not error, in case we are attached; sometimes the
2033 kernel doesn't let us at the registers. */
2034 char *err = strerror (errno);
2035 char *msg = alloca (strlen (err) + 128);
2036 sprintf (msg, "reading register %d: %s", regno, err);
2037 error (msg);
2038 goto error_exit;
2039 }
2040 }
2041
2042 if (the_low_target.supply_ptrace_register)
2043 the_low_target.supply_ptrace_register (regno, buf);
2044 else
2045 supply_register (regno, buf);
2046
2047 error_exit:;
2048 }
2049
2050 /* Fetch all registers, or just one, from the child process. */
2051 static void
2052 usr_fetch_inferior_registers (int regno)
2053 {
2054 if (regno == -1 || regno == 0)
2055 for (regno = 0; regno < the_low_target.num_regs; regno++)
2056 fetch_register (regno);
2057 else
2058 fetch_register (regno);
2059 }
2060
2061 /* Store our register values back into the inferior.
2062 If REGNO is -1, do this for all registers.
2063 Otherwise, REGNO specifies which register (so we can save time). */
2064 static void
2065 usr_store_inferior_registers (int regno)
2066 {
2067 CORE_ADDR regaddr;
2068 int i, size;
2069 char *buf;
2070 int pid;
2071
2072 if (regno >= 0)
2073 {
2074 if (regno >= the_low_target.num_regs)
2075 return;
2076
2077 if ((*the_low_target.cannot_store_register) (regno) == 1)
2078 return;
2079
2080 regaddr = register_addr (regno);
2081 if (regaddr == -1)
2082 return;
2083 errno = 0;
2084 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2085 & - sizeof (PTRACE_XFER_TYPE);
2086 buf = alloca (size);
2087 memset (buf, 0, size);
2088
2089 if (the_low_target.collect_ptrace_register)
2090 the_low_target.collect_ptrace_register (regno, buf);
2091 else
2092 collect_register (regno, buf);
2093
2094 pid = lwpid_of (get_thread_lwp (current_inferior));
2095 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2096 {
2097 errno = 0;
2098 ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr,
2099 *(PTRACE_XFER_TYPE *) (buf + i));
2100 if (errno != 0)
2101 {
2102 /* At this point, ESRCH should mean the process is
2103 already gone, in which case we simply ignore attempts
2104 to change its registers. See also the related
2105 comment in linux_resume_one_lwp. */
2106 if (errno == ESRCH)
2107 return;
2108
2109 if ((*the_low_target.cannot_store_register) (regno) == 0)
2110 {
2111 char *err = strerror (errno);
2112 char *msg = alloca (strlen (err) + 128);
2113 sprintf (msg, "writing register %d: %s",
2114 regno, err);
2115 error (msg);
2116 return;
2117 }
2118 }
2119 regaddr += sizeof (PTRACE_XFER_TYPE);
2120 }
2121 }
2122 else
2123 for (regno = 0; regno < the_low_target.num_regs; regno++)
2124 usr_store_inferior_registers (regno);
2125 }
2126 #endif /* HAVE_LINUX_USRREGS */
2127
2128
2129
2130 #ifdef HAVE_LINUX_REGSETS
2131
2132 static int
2133 regsets_fetch_inferior_registers ()
2134 {
2135 struct regset_info *regset;
2136 int saw_general_regs = 0;
2137 int pid;
2138
2139 regset = target_regsets;
2140
2141 pid = lwpid_of (get_thread_lwp (current_inferior));
2142 while (regset->size >= 0)
2143 {
2144 void *buf;
2145 int res;
2146
2147 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2148 {
2149 regset ++;
2150 continue;
2151 }
2152
2153 buf = xmalloc (regset->size);
2154 #ifndef __sparc__
2155 res = ptrace (regset->get_request, pid, 0, buf);
2156 #else
2157 res = ptrace (regset->get_request, pid, buf, 0);
2158 #endif
2159 if (res < 0)
2160 {
2161 if (errno == EIO)
2162 {
2163 /* If we get EIO on a regset, do not try it again for
2164 this process. */
2165 disabled_regsets[regset - target_regsets] = 1;
2166 free (buf);
2167 continue;
2168 }
2169 else
2170 {
2171 char s[256];
2172 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2173 pid);
2174 perror (s);
2175 }
2176 }
2177 else if (regset->type == GENERAL_REGS)
2178 saw_general_regs = 1;
2179 regset->store_function (buf);
2180 regset ++;
2181 free (buf);
2182 }
2183 if (saw_general_regs)
2184 return 0;
2185 else
2186 return 1;
2187 }
2188
2189 static int
2190 regsets_store_inferior_registers ()
2191 {
2192 struct regset_info *regset;
2193 int saw_general_regs = 0;
2194 int pid;
2195
2196 regset = target_regsets;
2197
2198 pid = lwpid_of (get_thread_lwp (current_inferior));
2199 while (regset->size >= 0)
2200 {
2201 void *buf;
2202 int res;
2203
2204 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2205 {
2206 regset ++;
2207 continue;
2208 }
2209
2210 buf = xmalloc (regset->size);
2211
2212 /* First fill the buffer with the current register set contents,
2213 in case there are any items in the kernel's regset that are
2214 not in gdbserver's regcache. */
2215 #ifndef __sparc__
2216 res = ptrace (regset->get_request, pid, 0, buf);
2217 #else
2218 res = ptrace (regset->get_request, pid, buf, 0);
2219 #endif
2220
2221 if (res == 0)
2222 {
2223 /* Then overlay our cached registers on that. */
2224 regset->fill_function (buf);
2225
2226 /* Only now do we write the register set. */
2227 #ifndef __sparc__
2228 res = ptrace (regset->set_request, pid, 0, buf);
2229 #else
2230 res = ptrace (regset->set_request, pid, buf, 0);
2231 #endif
2232 }
2233
2234 if (res < 0)
2235 {
2236 if (errno == EIO)
2237 {
2238 /* If we get EIO on a regset, do not try it again for
2239 this process. */
2240 disabled_regsets[regset - target_regsets] = 1;
2241 free (buf);
2242 continue;
2243 }
2244 else if (errno == ESRCH)
2245 {
2246 /* At this point, ESRCH should mean the process is
2247 already gone, in which case we simply ignore attempts
2248 to change its registers. See also the related
2249 comment in linux_resume_one_lwp. */
2250 free (buf);
2251 return 0;
2252 }
2253 else
2254 {
2255 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2256 }
2257 }
2258 else if (regset->type == GENERAL_REGS)
2259 saw_general_regs = 1;
2260 regset ++;
2261 free (buf);
2262 }
2263 if (saw_general_regs)
2264 return 0;
2265 else
2266 return 1;
2267 return 0;
2268 }
2269
2270 #endif /* HAVE_LINUX_REGSETS */
2271
2272
2273 void
2274 linux_fetch_registers (int regno)
2275 {
2276 #ifdef HAVE_LINUX_REGSETS
2277 if (regsets_fetch_inferior_registers () == 0)
2278 return;
2279 #endif
2280 #ifdef HAVE_LINUX_USRREGS
2281 usr_fetch_inferior_registers (regno);
2282 #endif
2283 }
2284
2285 void
2286 linux_store_registers (int regno)
2287 {
2288 #ifdef HAVE_LINUX_REGSETS
2289 if (regsets_store_inferior_registers () == 0)
2290 return;
2291 #endif
2292 #ifdef HAVE_LINUX_USRREGS
2293 usr_store_inferior_registers (regno);
2294 #endif
2295 }
2296
2297
2298 /* Copy LEN bytes from inferior's memory starting at MEMADDR
2299 to debugger memory starting at MYADDR. */
2300
2301 static int
2302 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2303 {
2304 register int i;
2305 /* Round starting address down to longword boundary. */
2306 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2307 /* Round ending address up; get number of longwords that makes. */
2308 register int count
2309 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2310 / sizeof (PTRACE_XFER_TYPE);
2311 /* Allocate buffer of that many longwords. */
2312 register PTRACE_XFER_TYPE *buffer
2313 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2314 int fd;
2315 char filename[64];
2316 int pid = lwpid_of (get_thread_lwp (current_inferior));
2317
2318 /* Try using /proc. Don't bother for one word. */
2319 if (len >= 3 * sizeof (long))
2320 {
2321 /* We could keep this file open and cache it - possibly one per
2322 thread. That requires some juggling, but is even faster. */
2323 sprintf (filename, "/proc/%d/mem", pid);
2324 fd = open (filename, O_RDONLY | O_LARGEFILE);
2325 if (fd == -1)
2326 goto no_proc;
2327
2328 /* If pread64 is available, use it. It's faster if the kernel
2329 supports it (only one syscall), and it's 64-bit safe even on
2330 32-bit platforms (for instance, SPARC debugging a SPARC64
2331 application). */
2332 #ifdef HAVE_PREAD64
2333 if (pread64 (fd, myaddr, len, memaddr) != len)
2334 #else
2335 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
2336 #endif
2337 {
2338 close (fd);
2339 goto no_proc;
2340 }
2341
2342 close (fd);
2343 return 0;
2344 }
2345
2346 no_proc:
2347 /* Read all the longwords */
2348 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2349 {
2350 errno = 0;
2351 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2352 if (errno)
2353 return errno;
2354 }
2355
2356 /* Copy appropriate bytes out of the buffer. */
2357 memcpy (myaddr,
2358 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2359 len);
2360
2361 return 0;
2362 }
2363
2364 /* Copy LEN bytes of data from debugger memory at MYADDR
2365 to inferior's memory at MEMADDR.
2366 On failure (cannot write the inferior)
2367 returns the value of errno. */
2368
2369 static int
2370 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2371 {
2372 register int i;
2373 /* Round starting address down to longword boundary. */
2374 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2375 /* Round ending address up; get number of longwords that makes. */
2376 register int count
2377 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2378 /* Allocate buffer of that many longwords. */
2379 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2380 int pid = lwpid_of (get_thread_lwp (current_inferior));
2381
2382 if (debug_threads)
2383 {
2384 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
2385 }
2386
2387 /* Fill start and end extra bytes of buffer with existing memory data. */
2388
2389 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2390
2391 if (count > 1)
2392 {
2393 buffer[count - 1]
2394 = ptrace (PTRACE_PEEKTEXT, pid,
2395 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2396 * sizeof (PTRACE_XFER_TYPE)),
2397 0);
2398 }
2399
2400 /* Copy data to be written over corresponding part of buffer */
2401
2402 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2403
2404 /* Write the entire buffer. */
2405
2406 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2407 {
2408 errno = 0;
2409 ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2410 if (errno)
2411 return errno;
2412 }
2413
2414 return 0;
2415 }
2416
2417 static int linux_supports_tracefork_flag;
2418
2419 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2420
2421 static int
2422 linux_tracefork_grandchild (void *arg)
2423 {
2424 _exit (0);
2425 }
2426
2427 #define STACK_SIZE 4096
2428
2429 static int
2430 linux_tracefork_child (void *arg)
2431 {
2432 ptrace (PTRACE_TRACEME, 0, 0, 0);
2433 kill (getpid (), SIGSTOP);
2434 #ifdef __ia64__
2435 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2436 CLONE_VM | SIGCHLD, NULL);
2437 #else
2438 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2439 CLONE_VM | SIGCHLD, NULL);
2440 #endif
2441 _exit (0);
2442 }
2443
2444 /* Wrapper function for waitpid which handles EINTR, and emulates
2445 __WALL for systems where that is not available. */
2446
2447 static int
2448 my_waitpid (int pid, int *status, int flags)
2449 {
2450 int ret, out_errno;
2451
2452 if (debug_threads)
2453 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
2454
2455 if (flags & __WALL)
2456 {
2457 sigset_t block_mask, org_mask, wake_mask;
2458 int wnohang;
2459
2460 wnohang = (flags & WNOHANG) != 0;
2461 flags &= ~(__WALL | __WCLONE);
2462 flags |= WNOHANG;
2463
2464 /* Block all signals while here. This avoids knowing about
2465 LinuxThread's signals. */
2466 sigfillset (&block_mask);
2467 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
2468
2469 /* ... except during the sigsuspend below. */
2470 sigemptyset (&wake_mask);
2471
2472 while (1)
2473 {
2474 /* Since all signals are blocked, there's no need to check
2475 for EINTR here. */
2476 ret = waitpid (pid, status, flags);
2477 out_errno = errno;
2478
2479 if (ret == -1 && out_errno != ECHILD)
2480 break;
2481 else if (ret > 0)
2482 break;
2483
2484 if (flags & __WCLONE)
2485 {
2486 /* We've tried both flavors now. If WNOHANG is set,
2487 there's nothing else to do, just bail out. */
2488 if (wnohang)
2489 break;
2490
2491 if (debug_threads)
2492 fprintf (stderr, "blocking\n");
2493
2494 /* Block waiting for signals. */
2495 sigsuspend (&wake_mask);
2496 }
2497
2498 flags ^= __WCLONE;
2499 }
2500
2501 sigprocmask (SIG_SETMASK, &org_mask, NULL);
2502 }
2503 else
2504 {
2505 do
2506 ret = waitpid (pid, status, flags);
2507 while (ret == -1 && errno == EINTR);
2508 out_errno = errno;
2509 }
2510
2511 if (debug_threads)
2512 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
2513 pid, flags, status ? *status : -1, ret);
2514
2515 errno = out_errno;
2516 return ret;
2517 }
2518
2519 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2520 sure that we can enable the option, and that it had the desired
2521 effect. */
2522
2523 static void
2524 linux_test_for_tracefork (void)
2525 {
2526 int child_pid, ret, status;
2527 long second_pid;
2528 char *stack = xmalloc (STACK_SIZE * 4);
2529
2530 linux_supports_tracefork_flag = 0;
2531
2532 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2533 #ifdef __ia64__
2534 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2535 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2536 #else
2537 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2538 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2539 #endif
2540 if (child_pid == -1)
2541 perror_with_name ("clone");
2542
2543 ret = my_waitpid (child_pid, &status, 0);
2544 if (ret == -1)
2545 perror_with_name ("waitpid");
2546 else if (ret != child_pid)
2547 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2548 if (! WIFSTOPPED (status))
2549 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2550
2551 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2552 if (ret != 0)
2553 {
2554 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2555 if (ret != 0)
2556 {
2557 warning ("linux_test_for_tracefork: failed to kill child");
2558 return;
2559 }
2560
2561 ret = my_waitpid (child_pid, &status, 0);
2562 if (ret != child_pid)
2563 warning ("linux_test_for_tracefork: failed to wait for killed child");
2564 else if (!WIFSIGNALED (status))
2565 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2566 "killed child", status);
2567
2568 return;
2569 }
2570
2571 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2572 if (ret != 0)
2573 warning ("linux_test_for_tracefork: failed to resume child");
2574
2575 ret = my_waitpid (child_pid, &status, 0);
2576
2577 if (ret == child_pid && WIFSTOPPED (status)
2578 && status >> 16 == PTRACE_EVENT_FORK)
2579 {
2580 second_pid = 0;
2581 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2582 if (ret == 0 && second_pid != 0)
2583 {
2584 int second_status;
2585
2586 linux_supports_tracefork_flag = 1;
2587 my_waitpid (second_pid, &second_status, 0);
2588 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2589 if (ret != 0)
2590 warning ("linux_test_for_tracefork: failed to kill second child");
2591 my_waitpid (second_pid, &status, 0);
2592 }
2593 }
2594 else
2595 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2596 "(%d, status 0x%x)", ret, status);
2597
2598 do
2599 {
2600 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2601 if (ret != 0)
2602 warning ("linux_test_for_tracefork: failed to kill child");
2603 my_waitpid (child_pid, &status, 0);
2604 }
2605 while (WIFSTOPPED (status));
2606
2607 free (stack);
2608 }
2609
2610
2611 static void
2612 linux_look_up_symbols (void)
2613 {
2614 #ifdef USE_THREAD_DB
2615 struct process_info *proc = current_process ();
2616
2617 if (proc->private->thread_db_active)
2618 return;
2619
2620 proc->private->thread_db_active
2621 = thread_db_init (!linux_supports_tracefork_flag);
2622 #endif
2623 }
2624
2625 static void
2626 linux_request_interrupt (void)
2627 {
2628 extern unsigned long signal_pid;
2629
2630 if (!ptid_equal (cont_thread, null_ptid)
2631 && !ptid_equal (cont_thread, minus_one_ptid))
2632 {
2633 struct lwp_info *lwp;
2634 int lwpid;
2635
2636 lwp = get_thread_lwp (current_inferior);
2637 lwpid = lwpid_of (lwp);
2638 kill_lwp (lwpid, SIGINT);
2639 }
2640 else
2641 kill_lwp (signal_pid, SIGINT);
2642 }
2643
2644 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2645 to debugger memory starting at MYADDR. */
2646
2647 static int
2648 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2649 {
2650 char filename[PATH_MAX];
2651 int fd, n;
2652 int pid = lwpid_of (get_thread_lwp (current_inferior));
2653
2654 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2655
2656 fd = open (filename, O_RDONLY);
2657 if (fd < 0)
2658 return -1;
2659
2660 if (offset != (CORE_ADDR) 0
2661 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2662 n = -1;
2663 else
2664 n = read (fd, myaddr, len);
2665
2666 close (fd);
2667
2668 return n;
2669 }
2670
2671 /* These watchpoint related wrapper functions simply pass on the function call
2672 if the target has registered a corresponding function. */
2673
2674 static int
2675 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
2676 {
2677 if (the_low_target.insert_watchpoint != NULL)
2678 return the_low_target.insert_watchpoint (type, addr, len);
2679 else
2680 /* Unsupported (see target.h). */
2681 return 1;
2682 }
2683
2684 static int
2685 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
2686 {
2687 if (the_low_target.remove_watchpoint != NULL)
2688 return the_low_target.remove_watchpoint (type, addr, len);
2689 else
2690 /* Unsupported (see target.h). */
2691 return 1;
2692 }
2693
2694 static int
2695 linux_stopped_by_watchpoint (void)
2696 {
2697 if (the_low_target.stopped_by_watchpoint != NULL)
2698 return the_low_target.stopped_by_watchpoint ();
2699 else
2700 return 0;
2701 }
2702
2703 static CORE_ADDR
2704 linux_stopped_data_address (void)
2705 {
2706 if (the_low_target.stopped_data_address != NULL)
2707 return the_low_target.stopped_data_address ();
2708 else
2709 return 0;
2710 }
2711
2712 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2713 #if defined(__mcoldfire__)
2714 /* These should really be defined in the kernel's ptrace.h header. */
2715 #define PT_TEXT_ADDR 49*4
2716 #define PT_DATA_ADDR 50*4
2717 #define PT_TEXT_END_ADDR 51*4
2718 #endif
2719
2720 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2721 to tell gdb about. */
2722
2723 static int
2724 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2725 {
2726 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2727 unsigned long text, text_end, data;
2728 int pid = lwpid_of (get_thread_lwp (current_inferior));
2729
2730 errno = 0;
2731
2732 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2733 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2734 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2735
2736 if (errno == 0)
2737 {
2738 /* Both text and data offsets produced at compile-time (and so
2739 used by gdb) are relative to the beginning of the program,
2740 with the data segment immediately following the text segment.
2741 However, the actual runtime layout in memory may put the data
2742 somewhere else, so when we send gdb a data base-address, we
2743 use the real data base address and subtract the compile-time
2744 data base-address from it (which is just the length of the
2745 text segment). BSS immediately follows data in both
2746 cases. */
2747 *text_p = text;
2748 *data_p = data - (text_end - text);
2749
2750 return 1;
2751 }
2752 #endif
2753 return 0;
2754 }
2755 #endif
2756
2757 static int
2758 linux_qxfer_osdata (const char *annex,
2759 unsigned char *readbuf, unsigned const char *writebuf,
2760 CORE_ADDR offset, int len)
2761 {
2762 /* We make the process list snapshot when the object starts to be
2763 read. */
2764 static const char *buf;
2765 static long len_avail = -1;
2766 static struct buffer buffer;
2767
2768 DIR *dirp;
2769
2770 if (strcmp (annex, "processes") != 0)
2771 return 0;
2772
2773 if (!readbuf || writebuf)
2774 return 0;
2775
2776 if (offset == 0)
2777 {
2778 if (len_avail != -1 && len_avail != 0)
2779 buffer_free (&buffer);
2780 len_avail = 0;
2781 buf = NULL;
2782 buffer_init (&buffer);
2783 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2784
2785 dirp = opendir ("/proc");
2786 if (dirp)
2787 {
2788 struct dirent *dp;
2789 while ((dp = readdir (dirp)) != NULL)
2790 {
2791 struct stat statbuf;
2792 char procentry[sizeof ("/proc/4294967295")];
2793
2794 if (!isdigit (dp->d_name[0])
2795 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2796 continue;
2797
2798 sprintf (procentry, "/proc/%s", dp->d_name);
2799 if (stat (procentry, &statbuf) == 0
2800 && S_ISDIR (statbuf.st_mode))
2801 {
2802 char pathname[128];
2803 FILE *f;
2804 char cmd[MAXPATHLEN + 1];
2805 struct passwd *entry;
2806
2807 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2808 entry = getpwuid (statbuf.st_uid);
2809
2810 if ((f = fopen (pathname, "r")) != NULL)
2811 {
2812 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2813 if (len > 0)
2814 {
2815 int i;
2816 for (i = 0; i < len; i++)
2817 if (cmd[i] == '\0')
2818 cmd[i] = ' ';
2819 cmd[len] = '\0';
2820
2821 buffer_xml_printf (
2822 &buffer,
2823 "<item>"
2824 "<column name=\"pid\">%s</column>"
2825 "<column name=\"user\">%s</column>"
2826 "<column name=\"command\">%s</column>"
2827 "</item>",
2828 dp->d_name,
2829 entry ? entry->pw_name : "?",
2830 cmd);
2831 }
2832 fclose (f);
2833 }
2834 }
2835 }
2836
2837 closedir (dirp);
2838 }
2839 buffer_grow_str0 (&buffer, "</osdata>\n");
2840 buf = buffer_finish (&buffer);
2841 len_avail = strlen (buf);
2842 }
2843
2844 if (offset >= len_avail)
2845 {
2846 /* Done. Get rid of the data. */
2847 buffer_free (&buffer);
2848 buf = NULL;
2849 len_avail = 0;
2850 return 0;
2851 }
2852
2853 if (len > len_avail - offset)
2854 len = len_avail - offset;
2855 memcpy (readbuf, buf + offset, len);
2856
2857 return len;
2858 }
2859
2860 /* Convert a native/host siginfo object, into/from the siginfo in the
2861 layout of the inferiors' architecture. */
2862
2863 static void
2864 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
2865 {
2866 int done = 0;
2867
2868 if (the_low_target.siginfo_fixup != NULL)
2869 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
2870
2871 /* If there was no callback, or the callback didn't do anything,
2872 then just do a straight memcpy. */
2873 if (!done)
2874 {
2875 if (direction == 1)
2876 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
2877 else
2878 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
2879 }
2880 }
2881
2882 static int
2883 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2884 unsigned const char *writebuf, CORE_ADDR offset, int len)
2885 {
2886 int pid;
2887 struct siginfo siginfo;
2888 char inf_siginfo[sizeof (struct siginfo)];
2889
2890 if (current_inferior == NULL)
2891 return -1;
2892
2893 pid = lwpid_of (get_thread_lwp (current_inferior));
2894
2895 if (debug_threads)
2896 fprintf (stderr, "%s siginfo for lwp %d.\n",
2897 readbuf != NULL ? "Reading" : "Writing",
2898 pid);
2899
2900 if (offset > sizeof (siginfo))
2901 return -1;
2902
2903 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2904 return -1;
2905
2906 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
2907 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
2908 inferior with a 64-bit GDBSERVER should look the same as debugging it
2909 with a 32-bit GDBSERVER, we need to convert it. */
2910 siginfo_fixup (&siginfo, inf_siginfo, 0);
2911
2912 if (offset + len > sizeof (siginfo))
2913 len = sizeof (siginfo) - offset;
2914
2915 if (readbuf != NULL)
2916 memcpy (readbuf, inf_siginfo + offset, len);
2917 else
2918 {
2919 memcpy (inf_siginfo + offset, writebuf, len);
2920
2921 /* Convert back to ptrace layout before flushing it out. */
2922 siginfo_fixup (&siginfo, inf_siginfo, 1);
2923
2924 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2925 return -1;
2926 }
2927
2928 return len;
2929 }
2930
2931 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
2932 so we notice when children change state; as the handler for the
2933 sigsuspend in my_waitpid. */
2934
2935 static void
2936 sigchld_handler (int signo)
2937 {
2938 int old_errno = errno;
2939
2940 if (debug_threads)
2941 /* fprintf is not async-signal-safe, so call write directly. */
2942 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
2943
2944 if (target_is_async_p ())
2945 async_file_mark (); /* trigger a linux_wait */
2946
2947 errno = old_errno;
2948 }
2949
2950 static int
2951 linux_supports_non_stop (void)
2952 {
2953 return 1;
2954 }
2955
2956 static int
2957 linux_async (int enable)
2958 {
2959 int previous = (linux_event_pipe[0] != -1);
2960
2961 if (previous != enable)
2962 {
2963 sigset_t mask;
2964 sigemptyset (&mask);
2965 sigaddset (&mask, SIGCHLD);
2966
2967 sigprocmask (SIG_BLOCK, &mask, NULL);
2968
2969 if (enable)
2970 {
2971 if (pipe (linux_event_pipe) == -1)
2972 fatal ("creating event pipe failed.");
2973
2974 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
2975 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
2976
2977 /* Register the event loop handler. */
2978 add_file_handler (linux_event_pipe[0],
2979 handle_target_event, NULL);
2980
2981 /* Always trigger a linux_wait. */
2982 async_file_mark ();
2983 }
2984 else
2985 {
2986 delete_file_handler (linux_event_pipe[0]);
2987
2988 close (linux_event_pipe[0]);
2989 close (linux_event_pipe[1]);
2990 linux_event_pipe[0] = -1;
2991 linux_event_pipe[1] = -1;
2992 }
2993
2994 sigprocmask (SIG_UNBLOCK, &mask, NULL);
2995 }
2996
2997 return previous;
2998 }
2999
3000 static int
3001 linux_start_non_stop (int nonstop)
3002 {
3003 /* Register or unregister from event-loop accordingly. */
3004 linux_async (nonstop);
3005 return 0;
3006 }
3007
3008 static struct target_ops linux_target_ops = {
3009 linux_create_inferior,
3010 linux_attach,
3011 linux_kill,
3012 linux_detach,
3013 linux_join,
3014 linux_thread_alive,
3015 linux_resume,
3016 linux_wait,
3017 linux_fetch_registers,
3018 linux_store_registers,
3019 linux_read_memory,
3020 linux_write_memory,
3021 linux_look_up_symbols,
3022 linux_request_interrupt,
3023 linux_read_auxv,
3024 linux_insert_watchpoint,
3025 linux_remove_watchpoint,
3026 linux_stopped_by_watchpoint,
3027 linux_stopped_data_address,
3028 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3029 linux_read_offsets,
3030 #else
3031 NULL,
3032 #endif
3033 #ifdef USE_THREAD_DB
3034 thread_db_get_tls_address,
3035 #else
3036 NULL,
3037 #endif
3038 NULL,
3039 hostio_last_error_from_errno,
3040 linux_qxfer_osdata,
3041 linux_xfer_siginfo,
3042 linux_supports_non_stop,
3043 linux_async,
3044 linux_start_non_stop,
3045 };
3046
3047 static void
3048 linux_init_signals ()
3049 {
3050 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
3051 to find what the cancel signal actually is. */
3052 signal (__SIGRTMIN+1, SIG_IGN);
3053 }
3054
3055 void
3056 initialize_low (void)
3057 {
3058 struct sigaction sigchld_action;
3059 memset (&sigchld_action, 0, sizeof (sigchld_action));
3060 set_target_ops (&linux_target_ops);
3061 set_breakpoint_data (the_low_target.breakpoint,
3062 the_low_target.breakpoint_len);
3063 linux_init_signals ();
3064 linux_test_for_tracefork ();
3065 #ifdef HAVE_LINUX_REGSETS
3066 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
3067 ;
3068 disabled_regsets = xmalloc (num_regsets);
3069 #endif
3070
3071 sigchld_action.sa_handler = sigchld_handler;
3072 sigemptyset (&sigchld_action.sa_mask);
3073 sigchld_action.sa_flags = SA_RESTART;
3074 sigaction (SIGCHLD, &sigchld_action, NULL);
3075 }
This page took 0.0997 seconds and 5 git commands to generate.