* linux-low.c (linux_resume_one_lwp): Change type of first arg
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22 #include "ansidecl.h" /* For ATTRIBUTE_PACKED, must be bug in external.h. */
23 #include "elf/common.h"
24 #include "elf/external.h"
25
26 #include <sys/wait.h>
27 #include <stdio.h>
28 #include <sys/param.h>
29 #include <sys/ptrace.h>
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43
44 #ifndef PTRACE_GETSIGINFO
45 # define PTRACE_GETSIGINFO 0x4202
46 # define PTRACE_SETSIGINFO 0x4203
47 #endif
48
49 #ifndef O_LARGEFILE
50 #define O_LARGEFILE 0
51 #endif
52
53 /* If the system headers did not provide the constants, hard-code the normal
54 values. */
55 #ifndef PTRACE_EVENT_FORK
56
57 #define PTRACE_SETOPTIONS 0x4200
58 #define PTRACE_GETEVENTMSG 0x4201
59
60 /* options set using PTRACE_SETOPTIONS */
61 #define PTRACE_O_TRACESYSGOOD 0x00000001
62 #define PTRACE_O_TRACEFORK 0x00000002
63 #define PTRACE_O_TRACEVFORK 0x00000004
64 #define PTRACE_O_TRACECLONE 0x00000008
65 #define PTRACE_O_TRACEEXEC 0x00000010
66 #define PTRACE_O_TRACEVFORKDONE 0x00000020
67 #define PTRACE_O_TRACEEXIT 0x00000040
68
69 /* Wait extended result codes for the above trace options. */
70 #define PTRACE_EVENT_FORK 1
71 #define PTRACE_EVENT_VFORK 2
72 #define PTRACE_EVENT_CLONE 3
73 #define PTRACE_EVENT_EXEC 4
74 #define PTRACE_EVENT_VFORK_DONE 5
75 #define PTRACE_EVENT_EXIT 6
76
77 #endif /* PTRACE_EVENT_FORK */
78
79 /* We can't always assume that this flag is available, but all systems
80 with the ptrace event handlers also have __WALL, so it's safe to use
81 in some contexts. */
82 #ifndef __WALL
83 #define __WALL 0x40000000 /* Wait for any child. */
84 #endif
85
86 #ifdef __UCLIBC__
87 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
88 #define HAS_NOMMU
89 #endif
90 #endif
91
92 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
93 representation of the thread ID.
94
95 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
96 the same as the LWP ID.
97
98 ``all_processes'' is keyed by the "overall process ID", which
99 GNU/Linux calls tgid, "thread group ID". */
100
101 struct inferior_list all_lwps;
102
103 /* A list of all unknown processes which receive stop signals. Some other
104 process will presumably claim each of these as forked children
105 momentarily. */
106
107 struct inferior_list stopped_pids;
108
109 /* FIXME this is a bit of a hack, and could be removed. */
110 int stopping_threads;
111
112 /* FIXME make into a target method? */
113 int using_threads = 1;
114
115 /* This flag is true iff we've just created or attached to our first
116 inferior but it has not stopped yet. As soon as it does, we need
117 to call the low target's arch_setup callback. Doing this only on
118 the first inferior avoids reinializing the architecture on every
119 inferior, and avoids messing with the register caches of the
120 already running inferiors. NOTE: this assumes all inferiors under
121 control of gdbserver have the same architecture. */
122 static int new_inferior;
123
124 static void linux_resume_one_lwp (struct lwp_info *lwp,
125 int step, int signal, siginfo_t *info);
126 static void linux_resume (struct thread_resume *resume_info, size_t n);
127 static void stop_all_lwps (void);
128 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
129 static int check_removed_breakpoint (struct lwp_info *event_child);
130 static void *add_lwp (ptid_t ptid);
131 static int my_waitpid (int pid, int *status, int flags);
132 static int linux_stopped_by_watchpoint (void);
133 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
134
135 struct pending_signals
136 {
137 int signal;
138 siginfo_t info;
139 struct pending_signals *prev;
140 };
141
142 #define PTRACE_ARG3_TYPE long
143 #define PTRACE_XFER_TYPE long
144
145 #ifdef HAVE_LINUX_REGSETS
146 static char *disabled_regsets;
147 static int num_regsets;
148 #endif
149
150 /* The read/write ends of the pipe registered as waitable file in the
151 event loop. */
152 static int linux_event_pipe[2] = { -1, -1 };
153
154 /* True if we're currently in async mode. */
155 #define target_is_async_p() (linux_event_pipe[0] != -1)
156
157 static void send_sigstop (struct inferior_list_entry *entry);
158 static void wait_for_sigstop (struct inferior_list_entry *entry);
159
160 /* Accepts an integer PID; Returns a string representing a file that
161 can be opened to get info for the child process.
162 Space for the result is malloc'd, caller must free. */
163
164 char *
165 linux_child_pid_to_exec_file (int pid)
166 {
167 char *name1, *name2;
168
169 name1 = xmalloc (MAXPATHLEN);
170 name2 = xmalloc (MAXPATHLEN);
171 memset (name2, 0, MAXPATHLEN);
172
173 sprintf (name1, "/proc/%d/exe", pid);
174 if (readlink (name1, name2, MAXPATHLEN) > 0)
175 {
176 free (name1);
177 return name2;
178 }
179 else
180 {
181 free (name2);
182 return name1;
183 }
184 }
185
186 /* Return non-zero if HEADER is a 64-bit ELF file. */
187
188 static int
189 elf_64_header_p (const Elf64_External_Ehdr *header)
190 {
191 return (header->e_ident[EI_MAG0] == ELFMAG0
192 && header->e_ident[EI_MAG1] == ELFMAG1
193 && header->e_ident[EI_MAG2] == ELFMAG2
194 && header->e_ident[EI_MAG3] == ELFMAG3
195 && header->e_ident[EI_CLASS] == ELFCLASS64);
196 }
197
198 /* Return non-zero if FILE is a 64-bit ELF file,
199 zero if the file is not a 64-bit ELF file,
200 and -1 if the file is not accessible or doesn't exist. */
201
202 int
203 elf_64_file_p (const char *file)
204 {
205 Elf64_External_Ehdr header;
206 int fd;
207
208 fd = open (file, O_RDONLY);
209 if (fd < 0)
210 return -1;
211
212 if (read (fd, &header, sizeof (header)) != sizeof (header))
213 {
214 close (fd);
215 return 0;
216 }
217 close (fd);
218
219 return elf_64_header_p (&header);
220 }
221
222 static void
223 delete_lwp (struct lwp_info *lwp)
224 {
225 remove_thread (get_lwp_thread (lwp));
226 remove_inferior (&all_lwps, &lwp->head);
227 free (lwp);
228 }
229
230 /* Add a process to the common process list, and set its private
231 data. */
232
233 static struct process_info *
234 linux_add_process (int pid, int attached)
235 {
236 struct process_info *proc;
237
238 /* Is this the first process? If so, then set the arch. */
239 if (all_processes.head == NULL)
240 new_inferior = 1;
241
242 proc = add_process (pid, attached);
243 proc->private = xcalloc (1, sizeof (*proc->private));
244
245 return proc;
246 }
247
248 /* Remove a process from the common process list,
249 also freeing all private data. */
250
251 static void
252 linux_remove_process (struct process_info *process)
253 {
254 free (process->private);
255 remove_process (process);
256 }
257
258 /* Handle a GNU/Linux extended wait response. If we see a clone
259 event, we need to add the new LWP to our list (and not report the
260 trap to higher layers). */
261
262 static void
263 handle_extended_wait (struct lwp_info *event_child, int wstat)
264 {
265 int event = wstat >> 16;
266 struct lwp_info *new_lwp;
267
268 if (event == PTRACE_EVENT_CLONE)
269 {
270 ptid_t ptid;
271 unsigned long new_pid;
272 int ret, status = W_STOPCODE (SIGSTOP);
273
274 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
275
276 /* If we haven't already seen the new PID stop, wait for it now. */
277 if (! pull_pid_from_list (&stopped_pids, new_pid))
278 {
279 /* The new child has a pending SIGSTOP. We can't affect it until it
280 hits the SIGSTOP, but we're already attached. */
281
282 ret = my_waitpid (new_pid, &status, __WALL);
283
284 if (ret == -1)
285 perror_with_name ("waiting for new child");
286 else if (ret != new_pid)
287 warning ("wait returned unexpected PID %d", ret);
288 else if (!WIFSTOPPED (status))
289 warning ("wait returned unexpected status 0x%x", status);
290 }
291
292 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
293
294 ptid = ptid_build (pid_of (event_child), new_pid, 0);
295 new_lwp = (struct lwp_info *) add_lwp (ptid);
296 add_thread (ptid, new_lwp);
297
298 /* Normally we will get the pending SIGSTOP. But in some cases
299 we might get another signal delivered to the group first.
300 If we do get another signal, be sure not to lose it. */
301 if (WSTOPSIG (status) == SIGSTOP)
302 {
303 if (stopping_threads)
304 new_lwp->stopped = 1;
305 else
306 ptrace (PTRACE_CONT, new_pid, 0, 0);
307 }
308 else
309 {
310 new_lwp->stop_expected = 1;
311 if (stopping_threads)
312 {
313 new_lwp->stopped = 1;
314 new_lwp->status_pending_p = 1;
315 new_lwp->status_pending = status;
316 }
317 else
318 /* Pass the signal on. This is what GDB does - except
319 shouldn't we really report it instead? */
320 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
321 }
322
323 /* Always resume the current thread. If we are stopping
324 threads, it will have a pending SIGSTOP; we may as well
325 collect it now. */
326 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
327 }
328 }
329
330 /* This function should only be called if the process got a SIGTRAP.
331 The SIGTRAP could mean several things.
332
333 On i386, where decr_pc_after_break is non-zero:
334 If we were single-stepping this process using PTRACE_SINGLESTEP,
335 we will get only the one SIGTRAP (even if the instruction we
336 stepped over was a breakpoint). The value of $eip will be the
337 next instruction.
338 If we continue the process using PTRACE_CONT, we will get a
339 SIGTRAP when we hit a breakpoint. The value of $eip will be
340 the instruction after the breakpoint (i.e. needs to be
341 decremented). If we report the SIGTRAP to GDB, we must also
342 report the undecremented PC. If we cancel the SIGTRAP, we
343 must resume at the decremented PC.
344
345 (Presumably, not yet tested) On a non-decr_pc_after_break machine
346 with hardware or kernel single-step:
347 If we single-step over a breakpoint instruction, our PC will
348 point at the following instruction. If we continue and hit a
349 breakpoint instruction, our PC will point at the breakpoint
350 instruction. */
351
352 static CORE_ADDR
353 get_stop_pc (void)
354 {
355 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
356
357 if (get_thread_lwp (current_inferior)->stepping)
358 return stop_pc;
359 else
360 return stop_pc - the_low_target.decr_pc_after_break;
361 }
362
363 static void *
364 add_lwp (ptid_t ptid)
365 {
366 struct lwp_info *lwp;
367
368 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
369 memset (lwp, 0, sizeof (*lwp));
370
371 lwp->head.id = ptid;
372
373 add_inferior_to_list (&all_lwps, &lwp->head);
374
375 return lwp;
376 }
377
378 /* Start an inferior process and returns its pid.
379 ALLARGS is a vector of program-name and args. */
380
381 static int
382 linux_create_inferior (char *program, char **allargs)
383 {
384 struct lwp_info *new_lwp;
385 int pid;
386 ptid_t ptid;
387
388 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
389 pid = vfork ();
390 #else
391 pid = fork ();
392 #endif
393 if (pid < 0)
394 perror_with_name ("fork");
395
396 if (pid == 0)
397 {
398 ptrace (PTRACE_TRACEME, 0, 0, 0);
399
400 signal (__SIGRTMIN + 1, SIG_DFL);
401
402 setpgid (0, 0);
403
404 execv (program, allargs);
405 if (errno == ENOENT)
406 execvp (program, allargs);
407
408 fprintf (stderr, "Cannot exec %s: %s.\n", program,
409 strerror (errno));
410 fflush (stderr);
411 _exit (0177);
412 }
413
414 linux_add_process (pid, 0);
415
416 ptid = ptid_build (pid, pid, 0);
417 new_lwp = add_lwp (ptid);
418 add_thread (ptid, new_lwp);
419 new_lwp->must_set_ptrace_flags = 1;
420
421 return pid;
422 }
423
424 /* Attach to an inferior process. */
425
426 static void
427 linux_attach_lwp_1 (unsigned long lwpid, int initial)
428 {
429 ptid_t ptid;
430 struct lwp_info *new_lwp;
431
432 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
433 {
434 if (!initial)
435 {
436 /* If we fail to attach to an LWP, just warn. */
437 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
438 strerror (errno), errno);
439 fflush (stderr);
440 return;
441 }
442 else
443 /* If we fail to attach to a process, report an error. */
444 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
445 strerror (errno), errno);
446 }
447
448 if (initial)
449 /* NOTE/FIXME: This lwp might have not been the tgid. */
450 ptid = ptid_build (lwpid, lwpid, 0);
451 else
452 {
453 /* Note that extracting the pid from the current inferior is
454 safe, since we're always called in the context of the same
455 process as this new thread. */
456 int pid = pid_of (get_thread_lwp (current_inferior));
457 ptid = ptid_build (pid, lwpid, 0);
458 }
459
460 new_lwp = (struct lwp_info *) add_lwp (ptid);
461 add_thread (ptid, new_lwp);
462
463
464 /* We need to wait for SIGSTOP before being able to make the next
465 ptrace call on this LWP. */
466 new_lwp->must_set_ptrace_flags = 1;
467
468 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
469 brings it to a halt.
470
471 There are several cases to consider here:
472
473 1) gdbserver has already attached to the process and is being notified
474 of a new thread that is being created.
475 In this case we should ignore that SIGSTOP and resume the process.
476 This is handled below by setting stop_expected = 1.
477
478 2) This is the first thread (the process thread), and we're attaching
479 to it via attach_inferior.
480 In this case we want the process thread to stop.
481 This is handled by having linux_attach clear stop_expected after
482 we return.
483 ??? If the process already has several threads we leave the other
484 threads running.
485
486 3) GDB is connecting to gdbserver and is requesting an enumeration of all
487 existing threads.
488 In this case we want the thread to stop.
489 FIXME: This case is currently not properly handled.
490 We should wait for the SIGSTOP but don't. Things work apparently
491 because enough time passes between when we ptrace (ATTACH) and when
492 gdb makes the next ptrace call on the thread.
493
494 On the other hand, if we are currently trying to stop all threads, we
495 should treat the new thread as if we had sent it a SIGSTOP. This works
496 because we are guaranteed that the add_lwp call above added us to the
497 end of the list, and so the new thread has not yet reached
498 wait_for_sigstop (but will). */
499 if (! stopping_threads)
500 new_lwp->stop_expected = 1;
501 }
502
503 void
504 linux_attach_lwp (unsigned long lwpid)
505 {
506 linux_attach_lwp_1 (lwpid, 0);
507 }
508
509 int
510 linux_attach (unsigned long pid)
511 {
512 struct lwp_info *lwp;
513
514 linux_attach_lwp_1 (pid, 1);
515
516 linux_add_process (pid, 1);
517
518 if (!non_stop)
519 {
520 /* Don't ignore the initial SIGSTOP if we just attached to this
521 process. It will be collected by wait shortly. */
522 lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
523 ptid_build (pid, pid, 0));
524 lwp->stop_expected = 0;
525 }
526
527 return 0;
528 }
529
530 struct counter
531 {
532 int pid;
533 int count;
534 };
535
536 static int
537 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
538 {
539 struct counter *counter = args;
540
541 if (ptid_get_pid (entry->id) == counter->pid)
542 {
543 if (++counter->count > 1)
544 return 1;
545 }
546
547 return 0;
548 }
549
550 static int
551 last_thread_of_process_p (struct thread_info *thread)
552 {
553 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
554 int pid = ptid_get_pid (ptid);
555 struct counter counter = { pid , 0 };
556
557 return (find_inferior (&all_threads,
558 second_thread_of_pid_p, &counter) == NULL);
559 }
560
561 /* Kill the inferior lwp. */
562
563 static int
564 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
565 {
566 struct thread_info *thread = (struct thread_info *) entry;
567 struct lwp_info *lwp = get_thread_lwp (thread);
568 int wstat;
569 int pid = * (int *) args;
570
571 if (ptid_get_pid (entry->id) != pid)
572 return 0;
573
574 /* We avoid killing the first thread here, because of a Linux kernel (at
575 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
576 the children get a chance to be reaped, it will remain a zombie
577 forever. */
578
579 if (last_thread_of_process_p (thread))
580 {
581 if (debug_threads)
582 fprintf (stderr, "lkop: is last of process %s\n",
583 target_pid_to_str (entry->id));
584 return 0;
585 }
586
587 /* If we're killing a running inferior, make sure it is stopped
588 first, as PTRACE_KILL will not work otherwise. */
589 if (!lwp->stopped)
590 send_sigstop (&lwp->head);
591
592 do
593 {
594 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
595
596 /* Make sure it died. The loop is most likely unnecessary. */
597 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
598 } while (pid > 0 && WIFSTOPPED (wstat));
599
600 return 0;
601 }
602
603 static int
604 linux_kill (int pid)
605 {
606 struct process_info *process;
607 struct lwp_info *lwp;
608 struct thread_info *thread;
609 int wstat;
610 int lwpid;
611
612 process = find_process_pid (pid);
613 if (process == NULL)
614 return -1;
615
616 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
617
618 /* See the comment in linux_kill_one_lwp. We did not kill the first
619 thread in the list, so do so now. */
620 lwp = find_lwp_pid (pid_to_ptid (pid));
621 thread = get_lwp_thread (lwp);
622
623 if (debug_threads)
624 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
625 lwpid_of (lwp), pid);
626
627 /* If we're killing a running inferior, make sure it is stopped
628 first, as PTRACE_KILL will not work otherwise. */
629 if (!lwp->stopped)
630 send_sigstop (&lwp->head);
631
632 do
633 {
634 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
635
636 /* Make sure it died. The loop is most likely unnecessary. */
637 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
638 } while (lwpid > 0 && WIFSTOPPED (wstat));
639
640 delete_lwp (lwp);
641 linux_remove_process (process);
642 return 0;
643 }
644
645 static int
646 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
647 {
648 struct thread_info *thread = (struct thread_info *) entry;
649 struct lwp_info *lwp = get_thread_lwp (thread);
650 int pid = * (int *) args;
651
652 if (ptid_get_pid (entry->id) != pid)
653 return 0;
654
655 /* If we're detaching from a running inferior, make sure it is
656 stopped first, as PTRACE_DETACH will not work otherwise. */
657 if (!lwp->stopped)
658 {
659 int lwpid = lwpid_of (lwp);
660
661 stopping_threads = 1;
662 send_sigstop (&lwp->head);
663
664 /* If this detects a new thread through a clone event, the new
665 thread is appended to the end of the lwp list, so we'll
666 eventually detach from it. */
667 wait_for_sigstop (&lwp->head);
668 stopping_threads = 0;
669
670 /* If LWP exits while we're trying to stop it, there's nothing
671 left to do. */
672 lwp = find_lwp_pid (pid_to_ptid (lwpid));
673 if (lwp == NULL)
674 return 0;
675 }
676
677 /* Make sure the process isn't stopped at a breakpoint that's
678 no longer there. */
679 check_removed_breakpoint (lwp);
680
681 /* If this process is stopped but is expecting a SIGSTOP, then make
682 sure we take care of that now. This isn't absolutely guaranteed
683 to collect the SIGSTOP, but is fairly likely to. */
684 if (lwp->stop_expected)
685 {
686 int wstat;
687 /* Clear stop_expected, so that the SIGSTOP will be reported. */
688 lwp->stop_expected = 0;
689 if (lwp->stopped)
690 linux_resume_one_lwp (lwp, 0, 0, NULL);
691 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
692 }
693
694 /* Flush any pending changes to the process's registers. */
695 regcache_invalidate_one ((struct inferior_list_entry *)
696 get_lwp_thread (lwp));
697
698 /* Finally, let it resume. */
699 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
700
701 delete_lwp (lwp);
702 return 0;
703 }
704
705 static int
706 any_thread_of (struct inferior_list_entry *entry, void *args)
707 {
708 int *pid_p = args;
709
710 if (ptid_get_pid (entry->id) == *pid_p)
711 return 1;
712
713 return 0;
714 }
715
716 static int
717 linux_detach (int pid)
718 {
719 struct process_info *process;
720
721 process = find_process_pid (pid);
722 if (process == NULL)
723 return -1;
724
725 current_inferior =
726 (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
727
728 delete_all_breakpoints ();
729 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
730 linux_remove_process (process);
731 return 0;
732 }
733
734 static void
735 linux_join (int pid)
736 {
737 int status, ret;
738 struct process_info *process;
739
740 process = find_process_pid (pid);
741 if (process == NULL)
742 return;
743
744 do {
745 ret = my_waitpid (pid, &status, 0);
746 if (WIFEXITED (status) || WIFSIGNALED (status))
747 break;
748 } while (ret != -1 || errno != ECHILD);
749 }
750
751 /* Return nonzero if the given thread is still alive. */
752 static int
753 linux_thread_alive (ptid_t ptid)
754 {
755 struct lwp_info *lwp = find_lwp_pid (ptid);
756
757 /* We assume we always know if a thread exits. If a whole process
758 exited but we still haven't been able to report it to GDB, we'll
759 hold on to the last lwp of the dead process. */
760 if (lwp != NULL)
761 return !lwp->dead;
762 else
763 return 0;
764 }
765
766 /* Return nonzero if this process stopped at a breakpoint which
767 no longer appears to be inserted. Also adjust the PC
768 appropriately to resume where the breakpoint used to be. */
769 static int
770 check_removed_breakpoint (struct lwp_info *event_child)
771 {
772 CORE_ADDR stop_pc;
773 struct thread_info *saved_inferior;
774
775 if (event_child->pending_is_breakpoint == 0)
776 return 0;
777
778 if (debug_threads)
779 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
780 lwpid_of (event_child));
781
782 saved_inferior = current_inferior;
783 current_inferior = get_lwp_thread (event_child);
784
785 stop_pc = get_stop_pc ();
786
787 /* If the PC has changed since we stopped, then we shouldn't do
788 anything. This happens if, for instance, GDB handled the
789 decr_pc_after_break subtraction itself. */
790 if (stop_pc != event_child->pending_stop_pc)
791 {
792 if (debug_threads)
793 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
794 event_child->pending_stop_pc);
795
796 event_child->pending_is_breakpoint = 0;
797 current_inferior = saved_inferior;
798 return 0;
799 }
800
801 /* If the breakpoint is still there, we will report hitting it. */
802 if ((*the_low_target.breakpoint_at) (stop_pc))
803 {
804 if (debug_threads)
805 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
806 current_inferior = saved_inferior;
807 return 0;
808 }
809
810 if (debug_threads)
811 fprintf (stderr, "Removed breakpoint.\n");
812
813 /* For decr_pc_after_break targets, here is where we perform the
814 decrement. We go immediately from this function to resuming,
815 and can not safely call get_stop_pc () again. */
816 if (the_low_target.set_pc != NULL)
817 (*the_low_target.set_pc) (stop_pc);
818
819 /* We consumed the pending SIGTRAP. */
820 event_child->pending_is_breakpoint = 0;
821 event_child->status_pending_p = 0;
822 event_child->status_pending = 0;
823
824 current_inferior = saved_inferior;
825 return 1;
826 }
827
828 /* Return 1 if this lwp has an interesting status pending. This
829 function may silently resume an inferior lwp. */
830 static int
831 status_pending_p (struct inferior_list_entry *entry, void *arg)
832 {
833 struct lwp_info *lwp = (struct lwp_info *) entry;
834 ptid_t ptid = * (ptid_t *) arg;
835
836 /* Check if we're only interested in events from a specific process
837 or its lwps. */
838 if (!ptid_equal (minus_one_ptid, ptid)
839 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
840 return 0;
841
842 if (lwp->status_pending_p && !lwp->suspended)
843 if (check_removed_breakpoint (lwp))
844 {
845 /* This thread was stopped at a breakpoint, and the breakpoint
846 is now gone. We were told to continue (or step...) all threads,
847 so GDB isn't trying to single-step past this breakpoint.
848 So instead of reporting the old SIGTRAP, pretend we got to
849 the breakpoint just after it was removed instead of just
850 before; resume the process. */
851 linux_resume_one_lwp (lwp, 0, 0, NULL);
852 return 0;
853 }
854
855 return (lwp->status_pending_p && !lwp->suspended);
856 }
857
858 static int
859 same_lwp (struct inferior_list_entry *entry, void *data)
860 {
861 ptid_t ptid = *(ptid_t *) data;
862 int lwp;
863
864 if (ptid_get_lwp (ptid) != 0)
865 lwp = ptid_get_lwp (ptid);
866 else
867 lwp = ptid_get_pid (ptid);
868
869 if (ptid_get_lwp (entry->id) == lwp)
870 return 1;
871
872 return 0;
873 }
874
875 struct lwp_info *
876 find_lwp_pid (ptid_t ptid)
877 {
878 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
879 }
880
881 static struct lwp_info *
882 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
883 {
884 int ret;
885 int to_wait_for = -1;
886 struct lwp_info *child = NULL;
887
888 if (debug_threads)
889 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
890
891 if (ptid_equal (ptid, minus_one_ptid))
892 to_wait_for = -1; /* any child */
893 else
894 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
895
896 options |= __WALL;
897
898 retry:
899
900 ret = my_waitpid (to_wait_for, wstatp, options);
901 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
902 return NULL;
903 else if (ret == -1)
904 perror_with_name ("waitpid");
905
906 if (debug_threads
907 && (!WIFSTOPPED (*wstatp)
908 || (WSTOPSIG (*wstatp) != 32
909 && WSTOPSIG (*wstatp) != 33)))
910 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
911
912 child = find_lwp_pid (pid_to_ptid (ret));
913
914 /* If we didn't find a process, one of two things presumably happened:
915 - A process we started and then detached from has exited. Ignore it.
916 - A process we are controlling has forked and the new child's stop
917 was reported to us by the kernel. Save its PID. */
918 if (child == NULL && WIFSTOPPED (*wstatp))
919 {
920 add_pid_to_list (&stopped_pids, ret);
921 goto retry;
922 }
923 else if (child == NULL)
924 goto retry;
925
926 child->stopped = 1;
927 child->pending_is_breakpoint = 0;
928
929 child->last_status = *wstatp;
930
931 /* Architecture-specific setup after inferior is running.
932 This needs to happen after we have attached to the inferior
933 and it is stopped for the first time, but before we access
934 any inferior registers. */
935 if (new_inferior)
936 {
937 the_low_target.arch_setup ();
938 #ifdef HAVE_LINUX_REGSETS
939 memset (disabled_regsets, 0, num_regsets);
940 #endif
941 new_inferior = 0;
942 }
943
944 if (debug_threads
945 && WIFSTOPPED (*wstatp))
946 {
947 struct thread_info *saved_inferior = current_inferior;
948 current_inferior = (struct thread_info *)
949 find_inferior_id (&all_threads, child->head.id);
950 /* For testing only; i386_stop_pc prints out a diagnostic. */
951 if (the_low_target.get_pc != NULL)
952 get_stop_pc ();
953 current_inferior = saved_inferior;
954 }
955
956 return child;
957 }
958
959 /* Wait for an event from child PID. If PID is -1, wait for any
960 child. Store the stop status through the status pointer WSTAT.
961 OPTIONS is passed to the waitpid call. Return 0 if no child stop
962 event was found and OPTIONS contains WNOHANG. Return the PID of
963 the stopped child otherwise. */
964
965 static int
966 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
967 {
968 CORE_ADDR stop_pc;
969 struct lwp_info *event_child = NULL;
970 int bp_status;
971 struct lwp_info *requested_child = NULL;
972
973 /* Check for a lwp with a pending status. */
974 /* It is possible that the user changed the pending task's registers since
975 it stopped. We correctly handle the change of PC if we hit a breakpoint
976 (in check_removed_breakpoint); signals should be reported anyway. */
977
978 if (ptid_equal (ptid, minus_one_ptid)
979 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
980 {
981 event_child = (struct lwp_info *)
982 find_inferior (&all_lwps, status_pending_p, &ptid);
983 if (debug_threads && event_child)
984 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
985 }
986 else
987 {
988 requested_child = find_lwp_pid (ptid);
989 if (requested_child->status_pending_p
990 && !check_removed_breakpoint (requested_child))
991 event_child = requested_child;
992 }
993
994 if (event_child != NULL)
995 {
996 if (debug_threads)
997 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
998 lwpid_of (event_child), event_child->status_pending);
999 *wstat = event_child->status_pending;
1000 event_child->status_pending_p = 0;
1001 event_child->status_pending = 0;
1002 current_inferior = get_lwp_thread (event_child);
1003 return lwpid_of (event_child);
1004 }
1005
1006 /* We only enter this loop if no process has a pending wait status. Thus
1007 any action taken in response to a wait status inside this loop is
1008 responding as soon as we detect the status, not after any pending
1009 events. */
1010 while (1)
1011 {
1012 event_child = linux_wait_for_lwp (ptid, wstat, options);
1013
1014 if ((options & WNOHANG) && event_child == NULL)
1015 return 0;
1016
1017 if (event_child == NULL)
1018 error ("event from unknown child");
1019
1020 current_inferior = get_lwp_thread (event_child);
1021
1022 /* Check for thread exit. */
1023 if (! WIFSTOPPED (*wstat))
1024 {
1025 if (debug_threads)
1026 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1027
1028 /* If the last thread is exiting, just return. */
1029 if (last_thread_of_process_p (current_inferior))
1030 {
1031 if (debug_threads)
1032 fprintf (stderr, "LWP %ld is last lwp of process\n",
1033 lwpid_of (event_child));
1034 return lwpid_of (event_child);
1035 }
1036
1037 delete_lwp (event_child);
1038
1039 if (!non_stop)
1040 {
1041 current_inferior = (struct thread_info *) all_threads.head;
1042 if (debug_threads)
1043 fprintf (stderr, "Current inferior is now %ld\n",
1044 lwpid_of (get_thread_lwp (current_inferior)));
1045 }
1046 else
1047 {
1048 current_inferior = NULL;
1049 if (debug_threads)
1050 fprintf (stderr, "Current inferior is now <NULL>\n");
1051 }
1052
1053 /* If we were waiting for this particular child to do something...
1054 well, it did something. */
1055 if (requested_child != NULL)
1056 return lwpid_of (event_child);
1057
1058 /* Wait for a more interesting event. */
1059 continue;
1060 }
1061
1062 if (event_child->must_set_ptrace_flags)
1063 {
1064 ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1065 0, PTRACE_O_TRACECLONE);
1066 event_child->must_set_ptrace_flags = 0;
1067 }
1068
1069 if (WIFSTOPPED (*wstat)
1070 && WSTOPSIG (*wstat) == SIGSTOP
1071 && event_child->stop_expected)
1072 {
1073 if (debug_threads)
1074 fprintf (stderr, "Expected stop.\n");
1075 event_child->stop_expected = 0;
1076 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
1077 continue;
1078 }
1079
1080 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1081 && *wstat >> 16 != 0)
1082 {
1083 handle_extended_wait (event_child, *wstat);
1084 continue;
1085 }
1086
1087 /* If GDB is not interested in this signal, don't stop other
1088 threads, and don't report it to GDB. Just resume the
1089 inferior right away. We do this for threading-related
1090 signals as well as any that GDB specifically requested we
1091 ignore. But never ignore SIGSTOP if we sent it ourselves,
1092 and do not ignore signals when stepping - they may require
1093 special handling to skip the signal handler. */
1094 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1095 thread library? */
1096 if (WIFSTOPPED (*wstat)
1097 && !event_child->stepping
1098 && (
1099 #ifdef USE_THREAD_DB
1100 (current_process ()->private->thread_db_active
1101 && (WSTOPSIG (*wstat) == __SIGRTMIN
1102 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1103 ||
1104 #endif
1105 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1106 && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1107 {
1108 siginfo_t info, *info_p;
1109
1110 if (debug_threads)
1111 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1112 WSTOPSIG (*wstat), lwpid_of (event_child));
1113
1114 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1115 info_p = &info;
1116 else
1117 info_p = NULL;
1118 linux_resume_one_lwp (event_child,
1119 event_child->stepping,
1120 WSTOPSIG (*wstat), info_p);
1121 continue;
1122 }
1123
1124 /* If this event was not handled above, and is not a SIGTRAP, report
1125 it. */
1126 if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP)
1127 return lwpid_of (event_child);
1128
1129 /* If this target does not support breakpoints, we simply report the
1130 SIGTRAP; it's of no concern to us. */
1131 if (the_low_target.get_pc == NULL)
1132 return lwpid_of (event_child);
1133
1134 stop_pc = get_stop_pc ();
1135
1136 /* bp_reinsert will only be set if we were single-stepping.
1137 Notice that we will resume the process after hitting
1138 a gdbserver breakpoint; single-stepping to/over one
1139 is not supported (yet). */
1140 if (event_child->bp_reinsert != 0)
1141 {
1142 if (debug_threads)
1143 fprintf (stderr, "Reinserted breakpoint.\n");
1144 reinsert_breakpoint (event_child->bp_reinsert);
1145 event_child->bp_reinsert = 0;
1146
1147 /* Clear the single-stepping flag and SIGTRAP as we resume. */
1148 linux_resume_one_lwp (event_child, 0, 0, NULL);
1149 continue;
1150 }
1151
1152 bp_status = check_breakpoints (stop_pc);
1153
1154 if (bp_status != 0)
1155 {
1156 if (debug_threads)
1157 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1158
1159 /* We hit one of our own breakpoints. We mark it as a pending
1160 breakpoint, so that check_removed_breakpoint () will do the PC
1161 adjustment for us at the appropriate time. */
1162 event_child->pending_is_breakpoint = 1;
1163 event_child->pending_stop_pc = stop_pc;
1164
1165 /* We may need to put the breakpoint back. We continue in the event
1166 loop instead of simply replacing the breakpoint right away,
1167 in order to not lose signals sent to the thread that hit the
1168 breakpoint. Unfortunately this increases the window where another
1169 thread could sneak past the removed breakpoint. For the current
1170 use of server-side breakpoints (thread creation) this is
1171 acceptable; but it needs to be considered before this breakpoint
1172 mechanism can be used in more general ways. For some breakpoints
1173 it may be necessary to stop all other threads, but that should
1174 be avoided where possible.
1175
1176 If breakpoint_reinsert_addr is NULL, that means that we can
1177 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
1178 mark it for reinsertion, and single-step.
1179
1180 Otherwise, call the target function to figure out where we need
1181 our temporary breakpoint, create it, and continue executing this
1182 process. */
1183
1184 /* NOTE: we're lifting breakpoints in non-stop mode. This
1185 is currently only used for thread event breakpoints, so
1186 it isn't that bad as long as we have PTRACE_EVENT_CLONE
1187 events. */
1188 if (bp_status == 2)
1189 /* No need to reinsert. */
1190 linux_resume_one_lwp (event_child, 0, 0, NULL);
1191 else if (the_low_target.breakpoint_reinsert_addr == NULL)
1192 {
1193 event_child->bp_reinsert = stop_pc;
1194 uninsert_breakpoint (stop_pc);
1195 linux_resume_one_lwp (event_child, 1, 0, NULL);
1196 }
1197 else
1198 {
1199 reinsert_breakpoint_by_bp
1200 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1201 linux_resume_one_lwp (event_child, 0, 0, NULL);
1202 }
1203
1204 continue;
1205 }
1206
1207 if (debug_threads)
1208 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1209
1210 /* If we were single-stepping, we definitely want to report the
1211 SIGTRAP. Although the single-step operation has completed,
1212 do not clear clear the stepping flag yet; we need to check it
1213 in wait_for_sigstop. */
1214 if (event_child->stepping)
1215 return lwpid_of (event_child);
1216
1217 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
1218 Check if it is a breakpoint, and if so mark the process information
1219 accordingly. This will handle both the necessary fiddling with the
1220 PC on decr_pc_after_break targets and suppressing extra threads
1221 hitting a breakpoint if two hit it at once and then GDB removes it
1222 after the first is reported. Arguably it would be better to report
1223 multiple threads hitting breakpoints simultaneously, but the current
1224 remote protocol does not allow this. */
1225 if ((*the_low_target.breakpoint_at) (stop_pc))
1226 {
1227 event_child->pending_is_breakpoint = 1;
1228 event_child->pending_stop_pc = stop_pc;
1229 }
1230
1231 return lwpid_of (event_child);
1232 }
1233
1234 /* NOTREACHED */
1235 return 0;
1236 }
1237
1238 static int
1239 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1240 {
1241 ptid_t wait_ptid;
1242
1243 if (ptid_is_pid (ptid))
1244 {
1245 /* A request to wait for a specific tgid. This is not possible
1246 with waitpid, so instead, we wait for any child, and leave
1247 children we're not interested in right now with a pending
1248 status to report later. */
1249 wait_ptid = minus_one_ptid;
1250 }
1251 else
1252 wait_ptid = ptid;
1253
1254 while (1)
1255 {
1256 int event_pid;
1257
1258 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1259
1260 if (event_pid > 0
1261 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1262 {
1263 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1264
1265 if (! WIFSTOPPED (*wstat))
1266 mark_lwp_dead (event_child, *wstat);
1267 else
1268 {
1269 event_child->status_pending_p = 1;
1270 event_child->status_pending = *wstat;
1271 }
1272 }
1273 else
1274 return event_pid;
1275 }
1276 }
1277
1278 /* Wait for process, returns status. */
1279
1280 static ptid_t
1281 linux_wait_1 (ptid_t ptid,
1282 struct target_waitstatus *ourstatus, int target_options)
1283 {
1284 int w;
1285 struct thread_info *thread = NULL;
1286 struct lwp_info *lwp = NULL;
1287 int options;
1288 int pid;
1289
1290 /* Translate generic target options into linux options. */
1291 options = __WALL;
1292 if (target_options & TARGET_WNOHANG)
1293 options |= WNOHANG;
1294
1295 retry:
1296 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1297
1298 /* If we were only supposed to resume one thread, only wait for
1299 that thread - if it's still alive. If it died, however - which
1300 can happen if we're coming from the thread death case below -
1301 then we need to make sure we restart the other threads. We could
1302 pick a thread at random or restart all; restarting all is less
1303 arbitrary. */
1304 if (!non_stop
1305 && !ptid_equal (cont_thread, null_ptid)
1306 && !ptid_equal (cont_thread, minus_one_ptid))
1307 {
1308 thread = (struct thread_info *) find_inferior_id (&all_threads,
1309 cont_thread);
1310
1311 /* No stepping, no signal - unless one is pending already, of course. */
1312 if (thread == NULL)
1313 {
1314 struct thread_resume resume_info;
1315 resume_info.thread = minus_one_ptid;
1316 resume_info.kind = resume_continue;
1317 resume_info.sig = 0;
1318 linux_resume (&resume_info, 1);
1319 }
1320 else
1321 ptid = cont_thread;
1322 }
1323
1324 pid = linux_wait_for_event (ptid, &w, options);
1325 if (pid == 0) /* only if TARGET_WNOHANG */
1326 return null_ptid;
1327
1328 lwp = get_thread_lwp (current_inferior);
1329
1330 /* If we are waiting for a particular child, and it exited,
1331 linux_wait_for_event will return its exit status. Similarly if
1332 the last child exited. If this is not the last child, however,
1333 do not report it as exited until there is a 'thread exited' response
1334 available in the remote protocol. Instead, just wait for another event.
1335 This should be safe, because if the thread crashed we will already
1336 have reported the termination signal to GDB; that should stop any
1337 in-progress stepping operations, etc.
1338
1339 Report the exit status of the last thread to exit. This matches
1340 LinuxThreads' behavior. */
1341
1342 if (last_thread_of_process_p (current_inferior))
1343 {
1344 if (WIFEXITED (w) || WIFSIGNALED (w))
1345 {
1346 int pid = pid_of (lwp);
1347 struct process_info *process = find_process_pid (pid);
1348
1349 delete_lwp (lwp);
1350 linux_remove_process (process);
1351
1352 current_inferior = NULL;
1353
1354 if (WIFEXITED (w))
1355 {
1356 ourstatus->kind = TARGET_WAITKIND_EXITED;
1357 ourstatus->value.integer = WEXITSTATUS (w);
1358
1359 if (debug_threads)
1360 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1361 }
1362 else
1363 {
1364 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1365 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1366
1367 if (debug_threads)
1368 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1369
1370 }
1371
1372 return pid_to_ptid (pid);
1373 }
1374 }
1375 else
1376 {
1377 if (!WIFSTOPPED (w))
1378 goto retry;
1379 }
1380
1381 /* In all-stop, stop all threads. Be careful to only do this if
1382 we're about to report an event to GDB. */
1383 if (!non_stop)
1384 stop_all_lwps ();
1385
1386 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1387
1388 if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1389 {
1390 /* A thread that has been requested to stop by GDB with vCont;t,
1391 and it stopped cleanly, so report as SIG0. The use of
1392 SIGSTOP is an implementation detail. */
1393 ourstatus->value.sig = TARGET_SIGNAL_0;
1394 }
1395 else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1396 {
1397 /* A thread that has been requested to stop by GDB with vCont;t,
1398 but, it stopped for other reasons. Set stop_expected so the
1399 pending SIGSTOP is ignored and the LWP is resumed. */
1400 lwp->stop_expected = 1;
1401 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1402 }
1403 else
1404 {
1405 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1406 }
1407
1408 if (debug_threads)
1409 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1410 target_pid_to_str (lwp->head.id),
1411 ourstatus->kind,
1412 ourstatus->value.sig);
1413
1414 return lwp->head.id;
1415 }
1416
1417 /* Get rid of any pending event in the pipe. */
1418 static void
1419 async_file_flush (void)
1420 {
1421 int ret;
1422 char buf;
1423
1424 do
1425 ret = read (linux_event_pipe[0], &buf, 1);
1426 while (ret >= 0 || (ret == -1 && errno == EINTR));
1427 }
1428
1429 /* Put something in the pipe, so the event loop wakes up. */
1430 static void
1431 async_file_mark (void)
1432 {
1433 int ret;
1434
1435 async_file_flush ();
1436
1437 do
1438 ret = write (linux_event_pipe[1], "+", 1);
1439 while (ret == 0 || (ret == -1 && errno == EINTR));
1440
1441 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1442 be awakened anyway. */
1443 }
1444
1445 static ptid_t
1446 linux_wait (ptid_t ptid,
1447 struct target_waitstatus *ourstatus, int target_options)
1448 {
1449 ptid_t event_ptid;
1450
1451 if (debug_threads)
1452 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1453
1454 /* Flush the async file first. */
1455 if (target_is_async_p ())
1456 async_file_flush ();
1457
1458 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1459
1460 /* If at least one stop was reported, there may be more. A single
1461 SIGCHLD can signal more than one child stop. */
1462 if (target_is_async_p ()
1463 && (target_options & TARGET_WNOHANG) != 0
1464 && !ptid_equal (event_ptid, null_ptid))
1465 async_file_mark ();
1466
1467 return event_ptid;
1468 }
1469
1470 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1471 thread groups are in use, we need to use tkill. */
1472
1473 static int
1474 kill_lwp (unsigned long lwpid, int signo)
1475 {
1476 static int tkill_failed;
1477
1478 errno = 0;
1479
1480 #ifdef SYS_tkill
1481 if (!tkill_failed)
1482 {
1483 int ret = syscall (SYS_tkill, lwpid, signo);
1484 if (errno != ENOSYS)
1485 return ret;
1486 errno = 0;
1487 tkill_failed = 1;
1488 }
1489 #endif
1490
1491 return kill (lwpid, signo);
1492 }
1493
1494 static void
1495 send_sigstop (struct inferior_list_entry *entry)
1496 {
1497 struct lwp_info *lwp = (struct lwp_info *) entry;
1498 int pid;
1499
1500 if (lwp->stopped)
1501 return;
1502
1503 pid = lwpid_of (lwp);
1504
1505 /* If we already have a pending stop signal for this process, don't
1506 send another. */
1507 if (lwp->stop_expected)
1508 {
1509 if (debug_threads)
1510 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1511
1512 /* We clear the stop_expected flag so that wait_for_sigstop
1513 will receive the SIGSTOP event (instead of silently resuming and
1514 waiting again). It'll be reset below. */
1515 lwp->stop_expected = 0;
1516 return;
1517 }
1518
1519 if (debug_threads)
1520 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1521
1522 kill_lwp (pid, SIGSTOP);
1523 }
1524
1525 static void
1526 mark_lwp_dead (struct lwp_info *lwp, int wstat)
1527 {
1528 /* It's dead, really. */
1529 lwp->dead = 1;
1530
1531 /* Store the exit status for later. */
1532 lwp->status_pending_p = 1;
1533 lwp->status_pending = wstat;
1534
1535 /* So that check_removed_breakpoint doesn't try to figure out if
1536 this is stopped at a breakpoint. */
1537 lwp->pending_is_breakpoint = 0;
1538
1539 /* Prevent trying to stop it. */
1540 lwp->stopped = 1;
1541
1542 /* No further stops are expected from a dead lwp. */
1543 lwp->stop_expected = 0;
1544 }
1545
1546 static void
1547 wait_for_sigstop (struct inferior_list_entry *entry)
1548 {
1549 struct lwp_info *lwp = (struct lwp_info *) entry;
1550 struct thread_info *saved_inferior;
1551 int wstat;
1552 ptid_t saved_tid;
1553 ptid_t ptid;
1554
1555 if (lwp->stopped)
1556 return;
1557
1558 saved_inferior = current_inferior;
1559 if (saved_inferior != NULL)
1560 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1561 else
1562 saved_tid = null_ptid; /* avoid bogus unused warning */
1563
1564 ptid = lwp->head.id;
1565
1566 linux_wait_for_event (ptid, &wstat, __WALL);
1567
1568 /* If we stopped with a non-SIGSTOP signal, save it for later
1569 and record the pending SIGSTOP. If the process exited, just
1570 return. */
1571 if (WIFSTOPPED (wstat)
1572 && WSTOPSIG (wstat) != SIGSTOP)
1573 {
1574 if (debug_threads)
1575 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1576 lwpid_of (lwp), wstat);
1577
1578 /* Do not leave a pending single-step finish to be reported to
1579 the client. The client will give us a new action for this
1580 thread, possibly a continue request --- otherwise, the client
1581 would consider this pending SIGTRAP reported later a spurious
1582 signal. */
1583 if (WSTOPSIG (wstat) == SIGTRAP
1584 && lwp->stepping
1585 && !linux_stopped_by_watchpoint ())
1586 {
1587 if (debug_threads)
1588 fprintf (stderr, " single-step SIGTRAP ignored\n");
1589 }
1590 else
1591 {
1592 lwp->status_pending_p = 1;
1593 lwp->status_pending = wstat;
1594 }
1595 lwp->stop_expected = 1;
1596 }
1597 else if (!WIFSTOPPED (wstat))
1598 {
1599 if (debug_threads)
1600 fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1601 lwpid_of (lwp));
1602
1603 /* Leave this status pending for the next time we're able to
1604 report it. In the mean time, we'll report this lwp as dead
1605 to GDB, so GDB doesn't try to read registers and memory from
1606 it. */
1607 mark_lwp_dead (lwp, wstat);
1608 }
1609
1610 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1611 current_inferior = saved_inferior;
1612 else
1613 {
1614 if (debug_threads)
1615 fprintf (stderr, "Previously current thread died.\n");
1616
1617 if (non_stop)
1618 {
1619 /* We can't change the current inferior behind GDB's back,
1620 otherwise, a subsequent command may apply to the wrong
1621 process. */
1622 current_inferior = NULL;
1623 }
1624 else
1625 {
1626 /* Set a valid thread as current. */
1627 set_desired_inferior (0);
1628 }
1629 }
1630 }
1631
1632 static void
1633 stop_all_lwps (void)
1634 {
1635 stopping_threads = 1;
1636 for_each_inferior (&all_lwps, send_sigstop);
1637 for_each_inferior (&all_lwps, wait_for_sigstop);
1638 stopping_threads = 0;
1639 }
1640
1641 /* Resume execution of the inferior process.
1642 If STEP is nonzero, single-step it.
1643 If SIGNAL is nonzero, give it that signal. */
1644
1645 static void
1646 linux_resume_one_lwp (struct lwp_info *lwp,
1647 int step, int signal, siginfo_t *info)
1648 {
1649 struct thread_info *saved_inferior;
1650
1651 if (lwp->stopped == 0)
1652 return;
1653
1654 /* If we have pending signals or status, and a new signal, enqueue the
1655 signal. Also enqueue the signal if we are waiting to reinsert a
1656 breakpoint; it will be picked up again below. */
1657 if (signal != 0
1658 && (lwp->status_pending_p || lwp->pending_signals != NULL
1659 || lwp->bp_reinsert != 0))
1660 {
1661 struct pending_signals *p_sig;
1662 p_sig = xmalloc (sizeof (*p_sig));
1663 p_sig->prev = lwp->pending_signals;
1664 p_sig->signal = signal;
1665 if (info == NULL)
1666 memset (&p_sig->info, 0, sizeof (siginfo_t));
1667 else
1668 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1669 lwp->pending_signals = p_sig;
1670 }
1671
1672 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1673 return;
1674
1675 saved_inferior = current_inferior;
1676 current_inferior = get_lwp_thread (lwp);
1677
1678 if (debug_threads)
1679 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1680 lwpid_of (lwp), step ? "step" : "continue", signal,
1681 lwp->stop_expected ? "expected" : "not expected");
1682
1683 /* This bit needs some thinking about. If we get a signal that
1684 we must report while a single-step reinsert is still pending,
1685 we often end up resuming the thread. It might be better to
1686 (ew) allow a stack of pending events; then we could be sure that
1687 the reinsert happened right away and not lose any signals.
1688
1689 Making this stack would also shrink the window in which breakpoints are
1690 uninserted (see comment in linux_wait_for_lwp) but not enough for
1691 complete correctness, so it won't solve that problem. It may be
1692 worthwhile just to solve this one, however. */
1693 if (lwp->bp_reinsert != 0)
1694 {
1695 if (debug_threads)
1696 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1697 if (step == 0)
1698 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1699 step = 1;
1700
1701 /* Postpone any pending signal. It was enqueued above. */
1702 signal = 0;
1703 }
1704
1705 check_removed_breakpoint (lwp);
1706
1707 if (debug_threads && the_low_target.get_pc != NULL)
1708 {
1709 fprintf (stderr, " ");
1710 (*the_low_target.get_pc) ();
1711 }
1712
1713 /* If we have pending signals, consume one unless we are trying to reinsert
1714 a breakpoint. */
1715 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1716 {
1717 struct pending_signals **p_sig;
1718
1719 p_sig = &lwp->pending_signals;
1720 while ((*p_sig)->prev != NULL)
1721 p_sig = &(*p_sig)->prev;
1722
1723 signal = (*p_sig)->signal;
1724 if ((*p_sig)->info.si_signo != 0)
1725 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1726
1727 free (*p_sig);
1728 *p_sig = NULL;
1729 }
1730
1731 regcache_invalidate_one ((struct inferior_list_entry *)
1732 get_lwp_thread (lwp));
1733 errno = 0;
1734 lwp->stopped = 0;
1735 lwp->stepping = step;
1736 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal);
1737
1738 current_inferior = saved_inferior;
1739 if (errno)
1740 {
1741 /* ESRCH from ptrace either means that the thread was already
1742 running (an error) or that it is gone (a race condition). If
1743 it's gone, we will get a notification the next time we wait,
1744 so we can ignore the error. We could differentiate these
1745 two, but it's tricky without waiting; the thread still exists
1746 as a zombie, so sending it signal 0 would succeed. So just
1747 ignore ESRCH. */
1748 if (errno == ESRCH)
1749 return;
1750
1751 perror_with_name ("ptrace");
1752 }
1753 }
1754
1755 struct thread_resume_array
1756 {
1757 struct thread_resume *resume;
1758 size_t n;
1759 };
1760
1761 /* This function is called once per thread. We look up the thread
1762 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1763 resume request.
1764
1765 This algorithm is O(threads * resume elements), but resume elements
1766 is small (and will remain small at least until GDB supports thread
1767 suspension). */
1768 static int
1769 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1770 {
1771 struct lwp_info *lwp;
1772 struct thread_info *thread;
1773 int ndx;
1774 struct thread_resume_array *r;
1775
1776 thread = (struct thread_info *) entry;
1777 lwp = get_thread_lwp (thread);
1778 r = arg;
1779
1780 for (ndx = 0; ndx < r->n; ndx++)
1781 {
1782 ptid_t ptid = r->resume[ndx].thread;
1783 if (ptid_equal (ptid, minus_one_ptid)
1784 || ptid_equal (ptid, entry->id)
1785 || (ptid_is_pid (ptid)
1786 && (ptid_get_pid (ptid) == pid_of (lwp)))
1787 || (ptid_get_lwp (ptid) == -1
1788 && (ptid_get_pid (ptid) == pid_of (lwp))))
1789 {
1790 lwp->resume = &r->resume[ndx];
1791 return 0;
1792 }
1793 }
1794
1795 /* No resume action for this thread. */
1796 lwp->resume = NULL;
1797
1798 return 0;
1799 }
1800
1801
1802 /* Set *FLAG_P if this lwp has an interesting status pending. */
1803 static int
1804 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1805 {
1806 struct lwp_info *lwp = (struct lwp_info *) entry;
1807
1808 /* LWPs which will not be resumed are not interesting, because
1809 we might not wait for them next time through linux_wait. */
1810 if (lwp->resume == NULL)
1811 return 0;
1812
1813 /* If this thread has a removed breakpoint, we won't have any
1814 events to report later, so check now. check_removed_breakpoint
1815 may clear status_pending_p. We avoid calling check_removed_breakpoint
1816 for any thread that we are not otherwise going to resume - this
1817 lets us preserve stopped status when two threads hit a breakpoint.
1818 GDB removes the breakpoint to single-step a particular thread
1819 past it, then re-inserts it and resumes all threads. We want
1820 to report the second thread without resuming it in the interim. */
1821 if (lwp->status_pending_p)
1822 check_removed_breakpoint (lwp);
1823
1824 if (lwp->status_pending_p)
1825 * (int *) flag_p = 1;
1826
1827 return 0;
1828 }
1829
1830 /* This function is called once per thread. We check the thread's resume
1831 request, which will tell us whether to resume, step, or leave the thread
1832 stopped; and what signal, if any, it should be sent.
1833
1834 For threads which we aren't explicitly told otherwise, we preserve
1835 the stepping flag; this is used for stepping over gdbserver-placed
1836 breakpoints.
1837
1838 If pending_flags was set in any thread, we queue any needed
1839 signals, since we won't actually resume. We already have a pending
1840 event to report, so we don't need to preserve any step requests;
1841 they should be re-issued if necessary. */
1842
1843 static int
1844 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1845 {
1846 struct lwp_info *lwp;
1847 struct thread_info *thread;
1848 int step;
1849 int pending_flag = * (int *) arg;
1850
1851 thread = (struct thread_info *) entry;
1852 lwp = get_thread_lwp (thread);
1853
1854 if (lwp->resume == NULL)
1855 return 0;
1856
1857 if (lwp->resume->kind == resume_stop)
1858 {
1859 if (debug_threads)
1860 fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
1861
1862 if (!lwp->stopped)
1863 {
1864 if (debug_threads)
1865 fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
1866
1867 lwp->suspended = 1;
1868 send_sigstop (&lwp->head);
1869 }
1870 else
1871 {
1872 if (debug_threads)
1873 {
1874 if (lwp->suspended)
1875 fprintf (stderr, "already stopped/suspended LWP %ld\n",
1876 lwpid_of (lwp));
1877 else
1878 fprintf (stderr, "already stopped/not suspended LWP %ld\n",
1879 lwpid_of (lwp));
1880 }
1881
1882 /* Make sure we leave the LWP suspended, so we don't try to
1883 resume it without GDB telling us to. FIXME: The LWP may
1884 have been stopped in an internal event that was not meant
1885 to be notified back to GDB (e.g., gdbserver breakpoint),
1886 so we should be reporting a stop event in that case
1887 too. */
1888 lwp->suspended = 1;
1889 }
1890
1891 /* For stop requests, we're done. */
1892 lwp->resume = NULL;
1893 return 0;
1894 }
1895 else
1896 lwp->suspended = 0;
1897
1898 /* If this thread which is about to be resumed has a pending status,
1899 then don't resume any threads - we can just report the pending
1900 status. Make sure to queue any signals that would otherwise be
1901 sent. In all-stop mode, we do this decision based on if *any*
1902 thread has a pending status. */
1903 if (non_stop)
1904 resume_status_pending_p (&lwp->head, &pending_flag);
1905
1906 if (!pending_flag)
1907 {
1908 if (debug_threads)
1909 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
1910
1911 if (ptid_equal (lwp->resume->thread, minus_one_ptid)
1912 && lwp->stepping
1913 && lwp->pending_is_breakpoint)
1914 step = 1;
1915 else
1916 step = (lwp->resume->kind == resume_step);
1917
1918 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
1919 }
1920 else
1921 {
1922 if (debug_threads)
1923 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
1924
1925 /* If we have a new signal, enqueue the signal. */
1926 if (lwp->resume->sig != 0)
1927 {
1928 struct pending_signals *p_sig;
1929 p_sig = xmalloc (sizeof (*p_sig));
1930 p_sig->prev = lwp->pending_signals;
1931 p_sig->signal = lwp->resume->sig;
1932 memset (&p_sig->info, 0, sizeof (siginfo_t));
1933
1934 /* If this is the same signal we were previously stopped by,
1935 make sure to queue its siginfo. We can ignore the return
1936 value of ptrace; if it fails, we'll skip
1937 PTRACE_SETSIGINFO. */
1938 if (WIFSTOPPED (lwp->last_status)
1939 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1940 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1941
1942 lwp->pending_signals = p_sig;
1943 }
1944 }
1945
1946 lwp->resume = NULL;
1947 return 0;
1948 }
1949
1950 static void
1951 linux_resume (struct thread_resume *resume_info, size_t n)
1952 {
1953 int pending_flag;
1954 struct thread_resume_array array = { resume_info, n };
1955
1956 find_inferior (&all_threads, linux_set_resume_request, &array);
1957
1958 /* If there is a thread which would otherwise be resumed, which
1959 has a pending status, then don't resume any threads - we can just
1960 report the pending status. Make sure to queue any signals
1961 that would otherwise be sent. In non-stop mode, we'll apply this
1962 logic to each thread individually. */
1963 pending_flag = 0;
1964 if (!non_stop)
1965 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1966
1967 if (debug_threads)
1968 {
1969 if (pending_flag)
1970 fprintf (stderr, "Not resuming, pending status\n");
1971 else
1972 fprintf (stderr, "Resuming, no pending status\n");
1973 }
1974
1975 find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
1976 }
1977
1978 #ifdef HAVE_LINUX_USRREGS
1979
1980 int
1981 register_addr (int regnum)
1982 {
1983 int addr;
1984
1985 if (regnum < 0 || regnum >= the_low_target.num_regs)
1986 error ("Invalid register number %d.", regnum);
1987
1988 addr = the_low_target.regmap[regnum];
1989
1990 return addr;
1991 }
1992
1993 /* Fetch one register. */
1994 static void
1995 fetch_register (int regno)
1996 {
1997 CORE_ADDR regaddr;
1998 int i, size;
1999 char *buf;
2000 int pid;
2001
2002 if (regno >= the_low_target.num_regs)
2003 return;
2004 if ((*the_low_target.cannot_fetch_register) (regno))
2005 return;
2006
2007 regaddr = register_addr (regno);
2008 if (regaddr == -1)
2009 return;
2010
2011 pid = lwpid_of (get_thread_lwp (current_inferior));
2012 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2013 & - sizeof (PTRACE_XFER_TYPE));
2014 buf = alloca (size);
2015 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2016 {
2017 errno = 0;
2018 *(PTRACE_XFER_TYPE *) (buf + i) =
2019 ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0);
2020 regaddr += sizeof (PTRACE_XFER_TYPE);
2021 if (errno != 0)
2022 {
2023 /* Warning, not error, in case we are attached; sometimes the
2024 kernel doesn't let us at the registers. */
2025 char *err = strerror (errno);
2026 char *msg = alloca (strlen (err) + 128);
2027 sprintf (msg, "reading register %d: %s", regno, err);
2028 error (msg);
2029 goto error_exit;
2030 }
2031 }
2032
2033 if (the_low_target.supply_ptrace_register)
2034 the_low_target.supply_ptrace_register (regno, buf);
2035 else
2036 supply_register (regno, buf);
2037
2038 error_exit:;
2039 }
2040
2041 /* Fetch all registers, or just one, from the child process. */
2042 static void
2043 usr_fetch_inferior_registers (int regno)
2044 {
2045 if (regno == -1 || regno == 0)
2046 for (regno = 0; regno < the_low_target.num_regs; regno++)
2047 fetch_register (regno);
2048 else
2049 fetch_register (regno);
2050 }
2051
2052 /* Store our register values back into the inferior.
2053 If REGNO is -1, do this for all registers.
2054 Otherwise, REGNO specifies which register (so we can save time). */
2055 static void
2056 usr_store_inferior_registers (int regno)
2057 {
2058 CORE_ADDR regaddr;
2059 int i, size;
2060 char *buf;
2061 int pid;
2062
2063 if (regno >= 0)
2064 {
2065 if (regno >= the_low_target.num_regs)
2066 return;
2067
2068 if ((*the_low_target.cannot_store_register) (regno) == 1)
2069 return;
2070
2071 regaddr = register_addr (regno);
2072 if (regaddr == -1)
2073 return;
2074 errno = 0;
2075 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2076 & - sizeof (PTRACE_XFER_TYPE);
2077 buf = alloca (size);
2078 memset (buf, 0, size);
2079
2080 if (the_low_target.collect_ptrace_register)
2081 the_low_target.collect_ptrace_register (regno, buf);
2082 else
2083 collect_register (regno, buf);
2084
2085 pid = lwpid_of (get_thread_lwp (current_inferior));
2086 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2087 {
2088 errno = 0;
2089 ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr,
2090 *(PTRACE_XFER_TYPE *) (buf + i));
2091 if (errno != 0)
2092 {
2093 /* At this point, ESRCH should mean the process is
2094 already gone, in which case we simply ignore attempts
2095 to change its registers. See also the related
2096 comment in linux_resume_one_lwp. */
2097 if (errno == ESRCH)
2098 return;
2099
2100 if ((*the_low_target.cannot_store_register) (regno) == 0)
2101 {
2102 char *err = strerror (errno);
2103 char *msg = alloca (strlen (err) + 128);
2104 sprintf (msg, "writing register %d: %s",
2105 regno, err);
2106 error (msg);
2107 return;
2108 }
2109 }
2110 regaddr += sizeof (PTRACE_XFER_TYPE);
2111 }
2112 }
2113 else
2114 for (regno = 0; regno < the_low_target.num_regs; regno++)
2115 usr_store_inferior_registers (regno);
2116 }
2117 #endif /* HAVE_LINUX_USRREGS */
2118
2119
2120
2121 #ifdef HAVE_LINUX_REGSETS
2122
2123 static int
2124 regsets_fetch_inferior_registers ()
2125 {
2126 struct regset_info *regset;
2127 int saw_general_regs = 0;
2128 int pid;
2129
2130 regset = target_regsets;
2131
2132 pid = lwpid_of (get_thread_lwp (current_inferior));
2133 while (regset->size >= 0)
2134 {
2135 void *buf;
2136 int res;
2137
2138 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2139 {
2140 regset ++;
2141 continue;
2142 }
2143
2144 buf = xmalloc (regset->size);
2145 #ifndef __sparc__
2146 res = ptrace (regset->get_request, pid, 0, buf);
2147 #else
2148 res = ptrace (regset->get_request, pid, buf, 0);
2149 #endif
2150 if (res < 0)
2151 {
2152 if (errno == EIO)
2153 {
2154 /* If we get EIO on a regset, do not try it again for
2155 this process. */
2156 disabled_regsets[regset - target_regsets] = 1;
2157 free (buf);
2158 continue;
2159 }
2160 else
2161 {
2162 char s[256];
2163 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2164 pid);
2165 perror (s);
2166 }
2167 }
2168 else if (regset->type == GENERAL_REGS)
2169 saw_general_regs = 1;
2170 regset->store_function (buf);
2171 regset ++;
2172 free (buf);
2173 }
2174 if (saw_general_regs)
2175 return 0;
2176 else
2177 return 1;
2178 }
2179
2180 static int
2181 regsets_store_inferior_registers ()
2182 {
2183 struct regset_info *regset;
2184 int saw_general_regs = 0;
2185 int pid;
2186
2187 regset = target_regsets;
2188
2189 pid = lwpid_of (get_thread_lwp (current_inferior));
2190 while (regset->size >= 0)
2191 {
2192 void *buf;
2193 int res;
2194
2195 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2196 {
2197 regset ++;
2198 continue;
2199 }
2200
2201 buf = xmalloc (regset->size);
2202
2203 /* First fill the buffer with the current register set contents,
2204 in case there are any items in the kernel's regset that are
2205 not in gdbserver's regcache. */
2206 #ifndef __sparc__
2207 res = ptrace (regset->get_request, pid, 0, buf);
2208 #else
2209 res = ptrace (regset->get_request, pid, buf, 0);
2210 #endif
2211
2212 if (res == 0)
2213 {
2214 /* Then overlay our cached registers on that. */
2215 regset->fill_function (buf);
2216
2217 /* Only now do we write the register set. */
2218 #ifndef __sparc__
2219 res = ptrace (regset->set_request, pid, 0, buf);
2220 #else
2221 res = ptrace (regset->set_request, pid, buf, 0);
2222 #endif
2223 }
2224
2225 if (res < 0)
2226 {
2227 if (errno == EIO)
2228 {
2229 /* If we get EIO on a regset, do not try it again for
2230 this process. */
2231 disabled_regsets[regset - target_regsets] = 1;
2232 free (buf);
2233 continue;
2234 }
2235 else if (errno == ESRCH)
2236 {
2237 /* At this point, ESRCH should mean the process is
2238 already gone, in which case we simply ignore attempts
2239 to change its registers. See also the related
2240 comment in linux_resume_one_lwp. */
2241 free (buf);
2242 return 0;
2243 }
2244 else
2245 {
2246 perror ("Warning: ptrace(regsets_store_inferior_registers)");
2247 }
2248 }
2249 else if (regset->type == GENERAL_REGS)
2250 saw_general_regs = 1;
2251 regset ++;
2252 free (buf);
2253 }
2254 if (saw_general_regs)
2255 return 0;
2256 else
2257 return 1;
2258 return 0;
2259 }
2260
2261 #endif /* HAVE_LINUX_REGSETS */
2262
2263
2264 void
2265 linux_fetch_registers (int regno)
2266 {
2267 #ifdef HAVE_LINUX_REGSETS
2268 if (regsets_fetch_inferior_registers () == 0)
2269 return;
2270 #endif
2271 #ifdef HAVE_LINUX_USRREGS
2272 usr_fetch_inferior_registers (regno);
2273 #endif
2274 }
2275
2276 void
2277 linux_store_registers (int regno)
2278 {
2279 #ifdef HAVE_LINUX_REGSETS
2280 if (regsets_store_inferior_registers () == 0)
2281 return;
2282 #endif
2283 #ifdef HAVE_LINUX_USRREGS
2284 usr_store_inferior_registers (regno);
2285 #endif
2286 }
2287
2288
2289 /* Copy LEN bytes from inferior's memory starting at MEMADDR
2290 to debugger memory starting at MYADDR. */
2291
2292 static int
2293 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2294 {
2295 register int i;
2296 /* Round starting address down to longword boundary. */
2297 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2298 /* Round ending address up; get number of longwords that makes. */
2299 register int count
2300 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2301 / sizeof (PTRACE_XFER_TYPE);
2302 /* Allocate buffer of that many longwords. */
2303 register PTRACE_XFER_TYPE *buffer
2304 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2305 int fd;
2306 char filename[64];
2307 int pid = lwpid_of (get_thread_lwp (current_inferior));
2308
2309 /* Try using /proc. Don't bother for one word. */
2310 if (len >= 3 * sizeof (long))
2311 {
2312 /* We could keep this file open and cache it - possibly one per
2313 thread. That requires some juggling, but is even faster. */
2314 sprintf (filename, "/proc/%d/mem", pid);
2315 fd = open (filename, O_RDONLY | O_LARGEFILE);
2316 if (fd == -1)
2317 goto no_proc;
2318
2319 /* If pread64 is available, use it. It's faster if the kernel
2320 supports it (only one syscall), and it's 64-bit safe even on
2321 32-bit platforms (for instance, SPARC debugging a SPARC64
2322 application). */
2323 #ifdef HAVE_PREAD64
2324 if (pread64 (fd, myaddr, len, memaddr) != len)
2325 #else
2326 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
2327 #endif
2328 {
2329 close (fd);
2330 goto no_proc;
2331 }
2332
2333 close (fd);
2334 return 0;
2335 }
2336
2337 no_proc:
2338 /* Read all the longwords */
2339 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2340 {
2341 errno = 0;
2342 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2343 if (errno)
2344 return errno;
2345 }
2346
2347 /* Copy appropriate bytes out of the buffer. */
2348 memcpy (myaddr,
2349 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2350 len);
2351
2352 return 0;
2353 }
2354
2355 /* Copy LEN bytes of data from debugger memory at MYADDR
2356 to inferior's memory at MEMADDR.
2357 On failure (cannot write the inferior)
2358 returns the value of errno. */
2359
2360 static int
2361 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2362 {
2363 register int i;
2364 /* Round starting address down to longword boundary. */
2365 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2366 /* Round ending address up; get number of longwords that makes. */
2367 register int count
2368 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2369 /* Allocate buffer of that many longwords. */
2370 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2371 int pid = lwpid_of (get_thread_lwp (current_inferior));
2372
2373 if (debug_threads)
2374 {
2375 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
2376 }
2377
2378 /* Fill start and end extra bytes of buffer with existing memory data. */
2379
2380 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0);
2381
2382 if (count > 1)
2383 {
2384 buffer[count - 1]
2385 = ptrace (PTRACE_PEEKTEXT, pid,
2386 (PTRACE_ARG3_TYPE) (addr + (count - 1)
2387 * sizeof (PTRACE_XFER_TYPE)),
2388 0);
2389 }
2390
2391 /* Copy data to be written over corresponding part of buffer */
2392
2393 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2394
2395 /* Write the entire buffer. */
2396
2397 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2398 {
2399 errno = 0;
2400 ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
2401 if (errno)
2402 return errno;
2403 }
2404
2405 return 0;
2406 }
2407
2408 static int linux_supports_tracefork_flag;
2409
2410 /* Helper functions for linux_test_for_tracefork, called via clone (). */
2411
2412 static int
2413 linux_tracefork_grandchild (void *arg)
2414 {
2415 _exit (0);
2416 }
2417
2418 #define STACK_SIZE 4096
2419
2420 static int
2421 linux_tracefork_child (void *arg)
2422 {
2423 ptrace (PTRACE_TRACEME, 0, 0, 0);
2424 kill (getpid (), SIGSTOP);
2425 #ifdef __ia64__
2426 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2427 CLONE_VM | SIGCHLD, NULL);
2428 #else
2429 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2430 CLONE_VM | SIGCHLD, NULL);
2431 #endif
2432 _exit (0);
2433 }
2434
2435 /* Wrapper function for waitpid which handles EINTR, and emulates
2436 __WALL for systems where that is not available. */
2437
2438 static int
2439 my_waitpid (int pid, int *status, int flags)
2440 {
2441 int ret, out_errno;
2442
2443 if (debug_threads)
2444 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
2445
2446 if (flags & __WALL)
2447 {
2448 sigset_t block_mask, org_mask, wake_mask;
2449 int wnohang;
2450
2451 wnohang = (flags & WNOHANG) != 0;
2452 flags &= ~(__WALL | __WCLONE);
2453 flags |= WNOHANG;
2454
2455 /* Block all signals while here. This avoids knowing about
2456 LinuxThread's signals. */
2457 sigfillset (&block_mask);
2458 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
2459
2460 /* ... except during the sigsuspend below. */
2461 sigemptyset (&wake_mask);
2462
2463 while (1)
2464 {
2465 /* Since all signals are blocked, there's no need to check
2466 for EINTR here. */
2467 ret = waitpid (pid, status, flags);
2468 out_errno = errno;
2469
2470 if (ret == -1 && out_errno != ECHILD)
2471 break;
2472 else if (ret > 0)
2473 break;
2474
2475 if (flags & __WCLONE)
2476 {
2477 /* We've tried both flavors now. If WNOHANG is set,
2478 there's nothing else to do, just bail out. */
2479 if (wnohang)
2480 break;
2481
2482 if (debug_threads)
2483 fprintf (stderr, "blocking\n");
2484
2485 /* Block waiting for signals. */
2486 sigsuspend (&wake_mask);
2487 }
2488
2489 flags ^= __WCLONE;
2490 }
2491
2492 sigprocmask (SIG_SETMASK, &org_mask, NULL);
2493 }
2494 else
2495 {
2496 do
2497 ret = waitpid (pid, status, flags);
2498 while (ret == -1 && errno == EINTR);
2499 out_errno = errno;
2500 }
2501
2502 if (debug_threads)
2503 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
2504 pid, flags, status ? *status : -1, ret);
2505
2506 errno = out_errno;
2507 return ret;
2508 }
2509
2510 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
2511 sure that we can enable the option, and that it had the desired
2512 effect. */
2513
2514 static void
2515 linux_test_for_tracefork (void)
2516 {
2517 int child_pid, ret, status;
2518 long second_pid;
2519 char *stack = xmalloc (STACK_SIZE * 4);
2520
2521 linux_supports_tracefork_flag = 0;
2522
2523 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
2524 #ifdef __ia64__
2525 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2526 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2527 #else
2528 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2529 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2530 #endif
2531 if (child_pid == -1)
2532 perror_with_name ("clone");
2533
2534 ret = my_waitpid (child_pid, &status, 0);
2535 if (ret == -1)
2536 perror_with_name ("waitpid");
2537 else if (ret != child_pid)
2538 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2539 if (! WIFSTOPPED (status))
2540 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2541
2542 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
2543 if (ret != 0)
2544 {
2545 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2546 if (ret != 0)
2547 {
2548 warning ("linux_test_for_tracefork: failed to kill child");
2549 return;
2550 }
2551
2552 ret = my_waitpid (child_pid, &status, 0);
2553 if (ret != child_pid)
2554 warning ("linux_test_for_tracefork: failed to wait for killed child");
2555 else if (!WIFSIGNALED (status))
2556 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2557 "killed child", status);
2558
2559 return;
2560 }
2561
2562 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2563 if (ret != 0)
2564 warning ("linux_test_for_tracefork: failed to resume child");
2565
2566 ret = my_waitpid (child_pid, &status, 0);
2567
2568 if (ret == child_pid && WIFSTOPPED (status)
2569 && status >> 16 == PTRACE_EVENT_FORK)
2570 {
2571 second_pid = 0;
2572 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2573 if (ret == 0 && second_pid != 0)
2574 {
2575 int second_status;
2576
2577 linux_supports_tracefork_flag = 1;
2578 my_waitpid (second_pid, &second_status, 0);
2579 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2580 if (ret != 0)
2581 warning ("linux_test_for_tracefork: failed to kill second child");
2582 my_waitpid (second_pid, &status, 0);
2583 }
2584 }
2585 else
2586 warning ("linux_test_for_tracefork: unexpected result from waitpid "
2587 "(%d, status 0x%x)", ret, status);
2588
2589 do
2590 {
2591 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2592 if (ret != 0)
2593 warning ("linux_test_for_tracefork: failed to kill child");
2594 my_waitpid (child_pid, &status, 0);
2595 }
2596 while (WIFSTOPPED (status));
2597
2598 free (stack);
2599 }
2600
2601
2602 static void
2603 linux_look_up_symbols (void)
2604 {
2605 #ifdef USE_THREAD_DB
2606 struct process_info *proc = current_process ();
2607
2608 if (proc->private->thread_db_active)
2609 return;
2610
2611 proc->private->thread_db_active
2612 = thread_db_init (!linux_supports_tracefork_flag);
2613 #endif
2614 }
2615
2616 static void
2617 linux_request_interrupt (void)
2618 {
2619 extern unsigned long signal_pid;
2620
2621 if (!ptid_equal (cont_thread, null_ptid)
2622 && !ptid_equal (cont_thread, minus_one_ptid))
2623 {
2624 struct lwp_info *lwp;
2625 int lwpid;
2626
2627 lwp = get_thread_lwp (current_inferior);
2628 lwpid = lwpid_of (lwp);
2629 kill_lwp (lwpid, SIGINT);
2630 }
2631 else
2632 kill_lwp (signal_pid, SIGINT);
2633 }
2634
2635 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2636 to debugger memory starting at MYADDR. */
2637
2638 static int
2639 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2640 {
2641 char filename[PATH_MAX];
2642 int fd, n;
2643 int pid = lwpid_of (get_thread_lwp (current_inferior));
2644
2645 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2646
2647 fd = open (filename, O_RDONLY);
2648 if (fd < 0)
2649 return -1;
2650
2651 if (offset != (CORE_ADDR) 0
2652 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2653 n = -1;
2654 else
2655 n = read (fd, myaddr, len);
2656
2657 close (fd);
2658
2659 return n;
2660 }
2661
2662 /* These watchpoint related wrapper functions simply pass on the function call
2663 if the target has registered a corresponding function. */
2664
2665 static int
2666 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
2667 {
2668 if (the_low_target.insert_watchpoint != NULL)
2669 return the_low_target.insert_watchpoint (type, addr, len);
2670 else
2671 /* Unsupported (see target.h). */
2672 return 1;
2673 }
2674
2675 static int
2676 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
2677 {
2678 if (the_low_target.remove_watchpoint != NULL)
2679 return the_low_target.remove_watchpoint (type, addr, len);
2680 else
2681 /* Unsupported (see target.h). */
2682 return 1;
2683 }
2684
2685 static int
2686 linux_stopped_by_watchpoint (void)
2687 {
2688 if (the_low_target.stopped_by_watchpoint != NULL)
2689 return the_low_target.stopped_by_watchpoint ();
2690 else
2691 return 0;
2692 }
2693
2694 static CORE_ADDR
2695 linux_stopped_data_address (void)
2696 {
2697 if (the_low_target.stopped_data_address != NULL)
2698 return the_low_target.stopped_data_address ();
2699 else
2700 return 0;
2701 }
2702
2703 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2704 #if defined(__mcoldfire__)
2705 /* These should really be defined in the kernel's ptrace.h header. */
2706 #define PT_TEXT_ADDR 49*4
2707 #define PT_DATA_ADDR 50*4
2708 #define PT_TEXT_END_ADDR 51*4
2709 #endif
2710
2711 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2712 to tell gdb about. */
2713
2714 static int
2715 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2716 {
2717 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2718 unsigned long text, text_end, data;
2719 int pid = lwpid_of (get_thread_lwp (current_inferior));
2720
2721 errno = 0;
2722
2723 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2724 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2725 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2726
2727 if (errno == 0)
2728 {
2729 /* Both text and data offsets produced at compile-time (and so
2730 used by gdb) are relative to the beginning of the program,
2731 with the data segment immediately following the text segment.
2732 However, the actual runtime layout in memory may put the data
2733 somewhere else, so when we send gdb a data base-address, we
2734 use the real data base address and subtract the compile-time
2735 data base-address from it (which is just the length of the
2736 text segment). BSS immediately follows data in both
2737 cases. */
2738 *text_p = text;
2739 *data_p = data - (text_end - text);
2740
2741 return 1;
2742 }
2743 #endif
2744 return 0;
2745 }
2746 #endif
2747
2748 static int
2749 linux_qxfer_osdata (const char *annex,
2750 unsigned char *readbuf, unsigned const char *writebuf,
2751 CORE_ADDR offset, int len)
2752 {
2753 /* We make the process list snapshot when the object starts to be
2754 read. */
2755 static const char *buf;
2756 static long len_avail = -1;
2757 static struct buffer buffer;
2758
2759 DIR *dirp;
2760
2761 if (strcmp (annex, "processes") != 0)
2762 return 0;
2763
2764 if (!readbuf || writebuf)
2765 return 0;
2766
2767 if (offset == 0)
2768 {
2769 if (len_avail != -1 && len_avail != 0)
2770 buffer_free (&buffer);
2771 len_avail = 0;
2772 buf = NULL;
2773 buffer_init (&buffer);
2774 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2775
2776 dirp = opendir ("/proc");
2777 if (dirp)
2778 {
2779 struct dirent *dp;
2780 while ((dp = readdir (dirp)) != NULL)
2781 {
2782 struct stat statbuf;
2783 char procentry[sizeof ("/proc/4294967295")];
2784
2785 if (!isdigit (dp->d_name[0])
2786 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2787 continue;
2788
2789 sprintf (procentry, "/proc/%s", dp->d_name);
2790 if (stat (procentry, &statbuf) == 0
2791 && S_ISDIR (statbuf.st_mode))
2792 {
2793 char pathname[128];
2794 FILE *f;
2795 char cmd[MAXPATHLEN + 1];
2796 struct passwd *entry;
2797
2798 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2799 entry = getpwuid (statbuf.st_uid);
2800
2801 if ((f = fopen (pathname, "r")) != NULL)
2802 {
2803 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2804 if (len > 0)
2805 {
2806 int i;
2807 for (i = 0; i < len; i++)
2808 if (cmd[i] == '\0')
2809 cmd[i] = ' ';
2810 cmd[len] = '\0';
2811
2812 buffer_xml_printf (
2813 &buffer,
2814 "<item>"
2815 "<column name=\"pid\">%s</column>"
2816 "<column name=\"user\">%s</column>"
2817 "<column name=\"command\">%s</column>"
2818 "</item>",
2819 dp->d_name,
2820 entry ? entry->pw_name : "?",
2821 cmd);
2822 }
2823 fclose (f);
2824 }
2825 }
2826 }
2827
2828 closedir (dirp);
2829 }
2830 buffer_grow_str0 (&buffer, "</osdata>\n");
2831 buf = buffer_finish (&buffer);
2832 len_avail = strlen (buf);
2833 }
2834
2835 if (offset >= len_avail)
2836 {
2837 /* Done. Get rid of the data. */
2838 buffer_free (&buffer);
2839 buf = NULL;
2840 len_avail = 0;
2841 return 0;
2842 }
2843
2844 if (len > len_avail - offset)
2845 len = len_avail - offset;
2846 memcpy (readbuf, buf + offset, len);
2847
2848 return len;
2849 }
2850
2851 /* Convert a native/host siginfo object, into/from the siginfo in the
2852 layout of the inferiors' architecture. */
2853
2854 static void
2855 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
2856 {
2857 int done = 0;
2858
2859 if (the_low_target.siginfo_fixup != NULL)
2860 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
2861
2862 /* If there was no callback, or the callback didn't do anything,
2863 then just do a straight memcpy. */
2864 if (!done)
2865 {
2866 if (direction == 1)
2867 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
2868 else
2869 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
2870 }
2871 }
2872
2873 static int
2874 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2875 unsigned const char *writebuf, CORE_ADDR offset, int len)
2876 {
2877 int pid;
2878 struct siginfo siginfo;
2879 char inf_siginfo[sizeof (struct siginfo)];
2880
2881 if (current_inferior == NULL)
2882 return -1;
2883
2884 pid = lwpid_of (get_thread_lwp (current_inferior));
2885
2886 if (debug_threads)
2887 fprintf (stderr, "%s siginfo for lwp %d.\n",
2888 readbuf != NULL ? "Reading" : "Writing",
2889 pid);
2890
2891 if (offset > sizeof (siginfo))
2892 return -1;
2893
2894 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2895 return -1;
2896
2897 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
2898 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
2899 inferior with a 64-bit GDBSERVER should look the same as debugging it
2900 with a 32-bit GDBSERVER, we need to convert it. */
2901 siginfo_fixup (&siginfo, inf_siginfo, 0);
2902
2903 if (offset + len > sizeof (siginfo))
2904 len = sizeof (siginfo) - offset;
2905
2906 if (readbuf != NULL)
2907 memcpy (readbuf, inf_siginfo + offset, len);
2908 else
2909 {
2910 memcpy (inf_siginfo + offset, writebuf, len);
2911
2912 /* Convert back to ptrace layout before flushing it out. */
2913 siginfo_fixup (&siginfo, inf_siginfo, 1);
2914
2915 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2916 return -1;
2917 }
2918
2919 return len;
2920 }
2921
2922 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
2923 so we notice when children change state; as the handler for the
2924 sigsuspend in my_waitpid. */
2925
2926 static void
2927 sigchld_handler (int signo)
2928 {
2929 int old_errno = errno;
2930
2931 if (debug_threads)
2932 /* fprintf is not async-signal-safe, so call write directly. */
2933 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
2934
2935 if (target_is_async_p ())
2936 async_file_mark (); /* trigger a linux_wait */
2937
2938 errno = old_errno;
2939 }
2940
2941 static int
2942 linux_supports_non_stop (void)
2943 {
2944 return 1;
2945 }
2946
2947 static int
2948 linux_async (int enable)
2949 {
2950 int previous = (linux_event_pipe[0] != -1);
2951
2952 if (previous != enable)
2953 {
2954 sigset_t mask;
2955 sigemptyset (&mask);
2956 sigaddset (&mask, SIGCHLD);
2957
2958 sigprocmask (SIG_BLOCK, &mask, NULL);
2959
2960 if (enable)
2961 {
2962 if (pipe (linux_event_pipe) == -1)
2963 fatal ("creating event pipe failed.");
2964
2965 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
2966 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
2967
2968 /* Register the event loop handler. */
2969 add_file_handler (linux_event_pipe[0],
2970 handle_target_event, NULL);
2971
2972 /* Always trigger a linux_wait. */
2973 async_file_mark ();
2974 }
2975 else
2976 {
2977 delete_file_handler (linux_event_pipe[0]);
2978
2979 close (linux_event_pipe[0]);
2980 close (linux_event_pipe[1]);
2981 linux_event_pipe[0] = -1;
2982 linux_event_pipe[1] = -1;
2983 }
2984
2985 sigprocmask (SIG_UNBLOCK, &mask, NULL);
2986 }
2987
2988 return previous;
2989 }
2990
2991 static int
2992 linux_start_non_stop (int nonstop)
2993 {
2994 /* Register or unregister from event-loop accordingly. */
2995 linux_async (nonstop);
2996 return 0;
2997 }
2998
2999 static struct target_ops linux_target_ops = {
3000 linux_create_inferior,
3001 linux_attach,
3002 linux_kill,
3003 linux_detach,
3004 linux_join,
3005 linux_thread_alive,
3006 linux_resume,
3007 linux_wait,
3008 linux_fetch_registers,
3009 linux_store_registers,
3010 linux_read_memory,
3011 linux_write_memory,
3012 linux_look_up_symbols,
3013 linux_request_interrupt,
3014 linux_read_auxv,
3015 linux_insert_watchpoint,
3016 linux_remove_watchpoint,
3017 linux_stopped_by_watchpoint,
3018 linux_stopped_data_address,
3019 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3020 linux_read_offsets,
3021 #else
3022 NULL,
3023 #endif
3024 #ifdef USE_THREAD_DB
3025 thread_db_get_tls_address,
3026 #else
3027 NULL,
3028 #endif
3029 NULL,
3030 hostio_last_error_from_errno,
3031 linux_qxfer_osdata,
3032 linux_xfer_siginfo,
3033 linux_supports_non_stop,
3034 linux_async,
3035 linux_start_non_stop,
3036 };
3037
3038 static void
3039 linux_init_signals ()
3040 {
3041 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
3042 to find what the cancel signal actually is. */
3043 signal (__SIGRTMIN+1, SIG_IGN);
3044 }
3045
3046 void
3047 initialize_low (void)
3048 {
3049 struct sigaction sigchld_action;
3050 memset (&sigchld_action, 0, sizeof (sigchld_action));
3051 set_target_ops (&linux_target_ops);
3052 set_breakpoint_data (the_low_target.breakpoint,
3053 the_low_target.breakpoint_len);
3054 linux_init_signals ();
3055 linux_test_for_tracefork ();
3056 #ifdef HAVE_LINUX_REGSETS
3057 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
3058 ;
3059 disabled_regsets = xmalloc (num_regsets);
3060 #endif
3061
3062 sigchld_action.sa_handler = sigchld_handler;
3063 sigemptyset (&sigchld_action.sa_mask);
3064 sigchld_action.sa_flags = SA_RESTART;
3065 sigaction (SIGCHLD, &sigchld_action, NULL);
3066 }
This page took 0.095468 seconds and 4 git commands to generate.