* linux-low.c (linux_kill_one_lwp): Assume the lwp is stopped.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior;
138
139 static void linux_resume_one_lwp (struct lwp_info *lwp,
140 int step, int signal, siginfo_t *info);
141 static void linux_resume (struct thread_resume *resume_info, size_t n);
142 static void stop_all_lwps (void);
143 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
144 static void *add_lwp (ptid_t ptid);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147 static int linux_core_of_thread (ptid_t ptid);
148 static void proceed_all_lwps (void);
149 static void unstop_all_lwps (struct lwp_info *except);
150 static int finish_step_over (struct lwp_info *lwp);
151 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
152 static int kill_lwp (unsigned long lwpid, int signo);
153 static void linux_enable_event_reporting (int pid);
154
155 /* True if the low target can hardware single-step. Such targets
156 don't need a BREAKPOINT_REINSERT_ADDR callback. */
157
158 static int
159 can_hardware_single_step (void)
160 {
161 return (the_low_target.breakpoint_reinsert_addr == NULL);
162 }
163
164 /* True if the low target supports memory breakpoints. If so, we'll
165 have a GET_PC implementation. */
166
167 static int
168 supports_breakpoints (void)
169 {
170 return (the_low_target.get_pc != NULL);
171 }
172
173 struct pending_signals
174 {
175 int signal;
176 siginfo_t info;
177 struct pending_signals *prev;
178 };
179
180 #define PTRACE_ARG3_TYPE void *
181 #define PTRACE_ARG4_TYPE void *
182 #define PTRACE_XFER_TYPE long
183
184 #ifdef HAVE_LINUX_REGSETS
185 static char *disabled_regsets;
186 static int num_regsets;
187 #endif
188
189 /* The read/write ends of the pipe registered as waitable file in the
190 event loop. */
191 static int linux_event_pipe[2] = { -1, -1 };
192
193 /* True if we're currently in async mode. */
194 #define target_is_async_p() (linux_event_pipe[0] != -1)
195
196 static void send_sigstop (struct lwp_info *lwp);
197 static void wait_for_sigstop (struct inferior_list_entry *entry);
198
199 /* Accepts an integer PID; Returns a string representing a file that
200 can be opened to get info for the child process.
201 Space for the result is malloc'd, caller must free. */
202
203 char *
204 linux_child_pid_to_exec_file (int pid)
205 {
206 char *name1, *name2;
207
208 name1 = xmalloc (MAXPATHLEN);
209 name2 = xmalloc (MAXPATHLEN);
210 memset (name2, 0, MAXPATHLEN);
211
212 sprintf (name1, "/proc/%d/exe", pid);
213 if (readlink (name1, name2, MAXPATHLEN) > 0)
214 {
215 free (name1);
216 return name2;
217 }
218 else
219 {
220 free (name2);
221 return name1;
222 }
223 }
224
225 /* Return non-zero if HEADER is a 64-bit ELF file. */
226
227 static int
228 elf_64_header_p (const Elf64_Ehdr *header)
229 {
230 return (header->e_ident[EI_MAG0] == ELFMAG0
231 && header->e_ident[EI_MAG1] == ELFMAG1
232 && header->e_ident[EI_MAG2] == ELFMAG2
233 && header->e_ident[EI_MAG3] == ELFMAG3
234 && header->e_ident[EI_CLASS] == ELFCLASS64);
235 }
236
237 /* Return non-zero if FILE is a 64-bit ELF file,
238 zero if the file is not a 64-bit ELF file,
239 and -1 if the file is not accessible or doesn't exist. */
240
241 int
242 elf_64_file_p (const char *file)
243 {
244 Elf64_Ehdr header;
245 int fd;
246
247 fd = open (file, O_RDONLY);
248 if (fd < 0)
249 return -1;
250
251 if (read (fd, &header, sizeof (header)) != sizeof (header))
252 {
253 close (fd);
254 return 0;
255 }
256 close (fd);
257
258 return elf_64_header_p (&header);
259 }
260
261 static void
262 delete_lwp (struct lwp_info *lwp)
263 {
264 remove_thread (get_lwp_thread (lwp));
265 remove_inferior (&all_lwps, &lwp->head);
266 free (lwp->arch_private);
267 free (lwp);
268 }
269
270 /* Add a process to the common process list, and set its private
271 data. */
272
273 static struct process_info *
274 linux_add_process (int pid, int attached)
275 {
276 struct process_info *proc;
277
278 /* Is this the first process? If so, then set the arch. */
279 if (all_processes.head == NULL)
280 new_inferior = 1;
281
282 proc = add_process (pid, attached);
283 proc->private = xcalloc (1, sizeof (*proc->private));
284
285 if (the_low_target.new_process != NULL)
286 proc->private->arch_private = the_low_target.new_process ();
287
288 return proc;
289 }
290
291 /* Wrapper function for waitpid which handles EINTR, and emulates
292 __WALL for systems where that is not available. */
293
294 static int
295 my_waitpid (int pid, int *status, int flags)
296 {
297 int ret, out_errno;
298
299 if (debug_threads)
300 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
301
302 if (flags & __WALL)
303 {
304 sigset_t block_mask, org_mask, wake_mask;
305 int wnohang;
306
307 wnohang = (flags & WNOHANG) != 0;
308 flags &= ~(__WALL | __WCLONE);
309 flags |= WNOHANG;
310
311 /* Block all signals while here. This avoids knowing about
312 LinuxThread's signals. */
313 sigfillset (&block_mask);
314 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
315
316 /* ... except during the sigsuspend below. */
317 sigemptyset (&wake_mask);
318
319 while (1)
320 {
321 /* Since all signals are blocked, there's no need to check
322 for EINTR here. */
323 ret = waitpid (pid, status, flags);
324 out_errno = errno;
325
326 if (ret == -1 && out_errno != ECHILD)
327 break;
328 else if (ret > 0)
329 break;
330
331 if (flags & __WCLONE)
332 {
333 /* We've tried both flavors now. If WNOHANG is set,
334 there's nothing else to do, just bail out. */
335 if (wnohang)
336 break;
337
338 if (debug_threads)
339 fprintf (stderr, "blocking\n");
340
341 /* Block waiting for signals. */
342 sigsuspend (&wake_mask);
343 }
344
345 flags ^= __WCLONE;
346 }
347
348 sigprocmask (SIG_SETMASK, &org_mask, NULL);
349 }
350 else
351 {
352 do
353 ret = waitpid (pid, status, flags);
354 while (ret == -1 && errno == EINTR);
355 out_errno = errno;
356 }
357
358 if (debug_threads)
359 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
360 pid, flags, status ? *status : -1, ret);
361
362 errno = out_errno;
363 return ret;
364 }
365
366 /* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
369
370 static void
371 handle_extended_wait (struct lwp_info *event_child, int wstat)
372 {
373 int event = wstat >> 16;
374 struct lwp_info *new_lwp;
375
376 if (event == PTRACE_EVENT_CLONE)
377 {
378 ptid_t ptid;
379 unsigned long new_pid;
380 int ret, status = W_STOPCODE (SIGSTOP);
381
382 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
383
384 /* If we haven't already seen the new PID stop, wait for it now. */
385 if (! pull_pid_from_list (&stopped_pids, new_pid))
386 {
387 /* The new child has a pending SIGSTOP. We can't affect it until it
388 hits the SIGSTOP, but we're already attached. */
389
390 ret = my_waitpid (new_pid, &status, __WALL);
391
392 if (ret == -1)
393 perror_with_name ("waiting for new child");
394 else if (ret != new_pid)
395 warning ("wait returned unexpected PID %d", ret);
396 else if (!WIFSTOPPED (status))
397 warning ("wait returned unexpected status 0x%x", status);
398 }
399
400 linux_enable_event_reporting (new_pid);
401
402 ptid = ptid_build (pid_of (event_child), new_pid, 0);
403 new_lwp = (struct lwp_info *) add_lwp (ptid);
404 add_thread (ptid, new_lwp);
405
406 /* Either we're going to immediately resume the new thread
407 or leave it stopped. linux_resume_one_lwp is a nop if it
408 thinks the thread is currently running, so set this first
409 before calling linux_resume_one_lwp. */
410 new_lwp->stopped = 1;
411
412 /* Normally we will get the pending SIGSTOP. But in some cases
413 we might get another signal delivered to the group first.
414 If we do get another signal, be sure not to lose it. */
415 if (WSTOPSIG (status) == SIGSTOP)
416 {
417 if (stopping_threads)
418 new_lwp->stop_pc = get_stop_pc (new_lwp);
419 else
420 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
421 }
422 else
423 {
424 new_lwp->stop_expected = 1;
425
426 if (stopping_threads)
427 {
428 new_lwp->stop_pc = get_stop_pc (new_lwp);
429 new_lwp->status_pending_p = 1;
430 new_lwp->status_pending = status;
431 }
432 else
433 /* Pass the signal on. This is what GDB does - except
434 shouldn't we really report it instead? */
435 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
436 }
437
438 /* Always resume the current thread. If we are stopping
439 threads, it will have a pending SIGSTOP; we may as well
440 collect it now. */
441 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
442 }
443 }
444
445 /* Return the PC as read from the regcache of LWP, without any
446 adjustment. */
447
448 static CORE_ADDR
449 get_pc (struct lwp_info *lwp)
450 {
451 struct thread_info *saved_inferior;
452 struct regcache *regcache;
453 CORE_ADDR pc;
454
455 if (the_low_target.get_pc == NULL)
456 return 0;
457
458 saved_inferior = current_inferior;
459 current_inferior = get_lwp_thread (lwp);
460
461 regcache = get_thread_regcache (current_inferior, 1);
462 pc = (*the_low_target.get_pc) (regcache);
463
464 if (debug_threads)
465 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
466
467 current_inferior = saved_inferior;
468 return pc;
469 }
470
471 /* This function should only be called if LWP got a SIGTRAP.
472 The SIGTRAP could mean several things.
473
474 On i386, where decr_pc_after_break is non-zero:
475 If we were single-stepping this process using PTRACE_SINGLESTEP,
476 we will get only the one SIGTRAP (even if the instruction we
477 stepped over was a breakpoint). The value of $eip will be the
478 next instruction.
479 If we continue the process using PTRACE_CONT, we will get a
480 SIGTRAP when we hit a breakpoint. The value of $eip will be
481 the instruction after the breakpoint (i.e. needs to be
482 decremented). If we report the SIGTRAP to GDB, we must also
483 report the undecremented PC. If we cancel the SIGTRAP, we
484 must resume at the decremented PC.
485
486 (Presumably, not yet tested) On a non-decr_pc_after_break machine
487 with hardware or kernel single-step:
488 If we single-step over a breakpoint instruction, our PC will
489 point at the following instruction. If we continue and hit a
490 breakpoint instruction, our PC will point at the breakpoint
491 instruction. */
492
493 static CORE_ADDR
494 get_stop_pc (struct lwp_info *lwp)
495 {
496 CORE_ADDR stop_pc;
497
498 if (the_low_target.get_pc == NULL)
499 return 0;
500
501 stop_pc = get_pc (lwp);
502
503 if (WSTOPSIG (lwp->last_status) == SIGTRAP
504 && !lwp->stepping
505 && !lwp->stopped_by_watchpoint
506 && lwp->last_status >> 16 == 0)
507 stop_pc -= the_low_target.decr_pc_after_break;
508
509 if (debug_threads)
510 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
511
512 return stop_pc;
513 }
514
515 static void *
516 add_lwp (ptid_t ptid)
517 {
518 struct lwp_info *lwp;
519
520 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
521 memset (lwp, 0, sizeof (*lwp));
522
523 lwp->head.id = ptid;
524
525 if (the_low_target.new_thread != NULL)
526 lwp->arch_private = the_low_target.new_thread ();
527
528 add_inferior_to_list (&all_lwps, &lwp->head);
529
530 return lwp;
531 }
532
533 /* Start an inferior process and returns its pid.
534 ALLARGS is a vector of program-name and args. */
535
536 static int
537 linux_create_inferior (char *program, char **allargs)
538 {
539 struct lwp_info *new_lwp;
540 int pid;
541 ptid_t ptid;
542
543 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
544 pid = vfork ();
545 #else
546 pid = fork ();
547 #endif
548 if (pid < 0)
549 perror_with_name ("fork");
550
551 if (pid == 0)
552 {
553 ptrace (PTRACE_TRACEME, 0, 0, 0);
554
555 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
556 signal (__SIGRTMIN + 1, SIG_DFL);
557 #endif
558
559 setpgid (0, 0);
560
561 execv (program, allargs);
562 if (errno == ENOENT)
563 execvp (program, allargs);
564
565 fprintf (stderr, "Cannot exec %s: %s.\n", program,
566 strerror (errno));
567 fflush (stderr);
568 _exit (0177);
569 }
570
571 linux_add_process (pid, 0);
572
573 ptid = ptid_build (pid, pid, 0);
574 new_lwp = add_lwp (ptid);
575 add_thread (ptid, new_lwp);
576 new_lwp->must_set_ptrace_flags = 1;
577
578 return pid;
579 }
580
581 /* Attach to an inferior process. */
582
583 static void
584 linux_attach_lwp_1 (unsigned long lwpid, int initial)
585 {
586 ptid_t ptid;
587 struct lwp_info *new_lwp;
588
589 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
590 {
591 if (!initial)
592 {
593 /* If we fail to attach to an LWP, just warn. */
594 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
595 strerror (errno), errno);
596 fflush (stderr);
597 return;
598 }
599 else
600 /* If we fail to attach to a process, report an error. */
601 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
602 strerror (errno), errno);
603 }
604
605 if (initial)
606 /* NOTE/FIXME: This lwp might have not been the tgid. */
607 ptid = ptid_build (lwpid, lwpid, 0);
608 else
609 {
610 /* Note that extracting the pid from the current inferior is
611 safe, since we're always called in the context of the same
612 process as this new thread. */
613 int pid = pid_of (get_thread_lwp (current_inferior));
614 ptid = ptid_build (pid, lwpid, 0);
615 }
616
617 new_lwp = (struct lwp_info *) add_lwp (ptid);
618 add_thread (ptid, new_lwp);
619
620 /* We need to wait for SIGSTOP before being able to make the next
621 ptrace call on this LWP. */
622 new_lwp->must_set_ptrace_flags = 1;
623
624 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
625 brings it to a halt.
626
627 There are several cases to consider here:
628
629 1) gdbserver has already attached to the process and is being notified
630 of a new thread that is being created.
631 In this case we should ignore that SIGSTOP and resume the
632 process. This is handled below by setting stop_expected = 1,
633 and the fact that add_thread sets last_resume_kind ==
634 resume_continue.
635
636 2) This is the first thread (the process thread), and we're attaching
637 to it via attach_inferior.
638 In this case we want the process thread to stop.
639 This is handled by having linux_attach set last_resume_kind ==
640 resume_stop after we return.
641 ??? If the process already has several threads we leave the other
642 threads running.
643
644 3) GDB is connecting to gdbserver and is requesting an enumeration of all
645 existing threads.
646 In this case we want the thread to stop.
647 FIXME: This case is currently not properly handled.
648 We should wait for the SIGSTOP but don't. Things work apparently
649 because enough time passes between when we ptrace (ATTACH) and when
650 gdb makes the next ptrace call on the thread.
651
652 On the other hand, if we are currently trying to stop all threads, we
653 should treat the new thread as if we had sent it a SIGSTOP. This works
654 because we are guaranteed that the add_lwp call above added us to the
655 end of the list, and so the new thread has not yet reached
656 wait_for_sigstop (but will). */
657 new_lwp->stop_expected = 1;
658 }
659
660 void
661 linux_attach_lwp (unsigned long lwpid)
662 {
663 linux_attach_lwp_1 (lwpid, 0);
664 }
665
666 int
667 linux_attach (unsigned long pid)
668 {
669 linux_attach_lwp_1 (pid, 1);
670 linux_add_process (pid, 1);
671
672 if (!non_stop)
673 {
674 struct thread_info *thread;
675
676 /* Don't ignore the initial SIGSTOP if we just attached to this
677 process. It will be collected by wait shortly. */
678 thread = find_thread_ptid (ptid_build (pid, pid, 0));
679 thread->last_resume_kind = resume_stop;
680 }
681
682 return 0;
683 }
684
685 struct counter
686 {
687 int pid;
688 int count;
689 };
690
691 static int
692 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
693 {
694 struct counter *counter = args;
695
696 if (ptid_get_pid (entry->id) == counter->pid)
697 {
698 if (++counter->count > 1)
699 return 1;
700 }
701
702 return 0;
703 }
704
705 static int
706 last_thread_of_process_p (struct thread_info *thread)
707 {
708 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
709 int pid = ptid_get_pid (ptid);
710 struct counter counter = { pid , 0 };
711
712 return (find_inferior (&all_threads,
713 second_thread_of_pid_p, &counter) == NULL);
714 }
715
716 /* Kill the inferior lwp. */
717
718 static int
719 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
720 {
721 struct thread_info *thread = (struct thread_info *) entry;
722 struct lwp_info *lwp = get_thread_lwp (thread);
723 int wstat;
724 int pid = * (int *) args;
725
726 if (ptid_get_pid (entry->id) != pid)
727 return 0;
728
729 /* We avoid killing the first thread here, because of a Linux kernel (at
730 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
731 the children get a chance to be reaped, it will remain a zombie
732 forever. */
733
734 if (lwpid_of (lwp) == pid)
735 {
736 if (debug_threads)
737 fprintf (stderr, "lkop: is last of process %s\n",
738 target_pid_to_str (entry->id));
739 return 0;
740 }
741
742 do
743 {
744 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
745
746 /* Make sure it died. The loop is most likely unnecessary. */
747 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
748 } while (pid > 0 && WIFSTOPPED (wstat));
749
750 return 0;
751 }
752
753 static int
754 linux_kill (int pid)
755 {
756 struct process_info *process;
757 struct lwp_info *lwp;
758 struct thread_info *thread;
759 int wstat;
760 int lwpid;
761
762 process = find_process_pid (pid);
763 if (process == NULL)
764 return -1;
765
766 /* If we're killing a running inferior, make sure it is stopped
767 first, as PTRACE_KILL will not work otherwise. */
768 stop_all_lwps ();
769
770 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
771
772 /* See the comment in linux_kill_one_lwp. We did not kill the first
773 thread in the list, so do so now. */
774 lwp = find_lwp_pid (pid_to_ptid (pid));
775 thread = get_lwp_thread (lwp);
776
777 if (debug_threads)
778 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp), pid);
780
781 do
782 {
783 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
784
785 /* Make sure it died. The loop is most likely unnecessary. */
786 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
787 } while (lwpid > 0 && WIFSTOPPED (wstat));
788
789 the_target->mourn (process);
790
791 /* Since we presently can only stop all lwps of all processes, we
792 need to unstop lwps of other processes. */
793 unstop_all_lwps (NULL);
794 return 0;
795 }
796
797 static int
798 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
799 {
800 struct thread_info *thread = (struct thread_info *) entry;
801 struct lwp_info *lwp = get_thread_lwp (thread);
802 int pid = * (int *) args;
803
804 if (ptid_get_pid (entry->id) != pid)
805 return 0;
806
807 /* If this process is stopped but is expecting a SIGSTOP, then make
808 sure we take care of that now. This isn't absolutely guaranteed
809 to collect the SIGSTOP, but is fairly likely to. */
810 if (lwp->stop_expected)
811 {
812 int wstat;
813 /* Clear stop_expected, so that the SIGSTOP will be reported. */
814 lwp->stop_expected = 0;
815 linux_resume_one_lwp (lwp, 0, 0, NULL);
816 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
817 }
818
819 /* Flush any pending changes to the process's registers. */
820 regcache_invalidate_one ((struct inferior_list_entry *)
821 get_lwp_thread (lwp));
822
823 /* Finally, let it resume. */
824 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
825
826 delete_lwp (lwp);
827 return 0;
828 }
829
830 static int
831 linux_detach (int pid)
832 {
833 struct process_info *process;
834
835 process = find_process_pid (pid);
836 if (process == NULL)
837 return -1;
838
839 /* Stop all threads before detaching. First, ptrace requires that
840 the thread is stopped to sucessfully detach. Second, thread_db
841 may need to uninstall thread event breakpoints from memory, which
842 only works with a stopped process anyway. */
843 stop_all_lwps ();
844
845 #ifdef USE_THREAD_DB
846 thread_db_detach (process);
847 #endif
848
849 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
850
851 the_target->mourn (process);
852
853 /* Since we presently can only stop all lwps of all processes, we
854 need to unstop lwps of other processes. */
855 unstop_all_lwps (NULL);
856 return 0;
857 }
858
859 /* Remove all LWPs that belong to process PROC from the lwp list. */
860
861 static int
862 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
863 {
864 struct lwp_info *lwp = (struct lwp_info *) entry;
865 struct process_info *process = proc;
866
867 if (pid_of (lwp) == pid_of (process))
868 delete_lwp (lwp);
869
870 return 0;
871 }
872
873 static void
874 linux_mourn (struct process_info *process)
875 {
876 struct process_info_private *priv;
877
878 #ifdef USE_THREAD_DB
879 thread_db_mourn (process);
880 #endif
881
882 find_inferior (&all_lwps, delete_lwp_callback, process);
883
884 /* Freeing all private data. */
885 priv = process->private;
886 free (priv->arch_private);
887 free (priv);
888 process->private = NULL;
889
890 remove_process (process);
891 }
892
893 static void
894 linux_join (int pid)
895 {
896 int status, ret;
897 struct process_info *process;
898
899 process = find_process_pid (pid);
900 if (process == NULL)
901 return;
902
903 do {
904 ret = my_waitpid (pid, &status, 0);
905 if (WIFEXITED (status) || WIFSIGNALED (status))
906 break;
907 } while (ret != -1 || errno != ECHILD);
908 }
909
910 /* Return nonzero if the given thread is still alive. */
911 static int
912 linux_thread_alive (ptid_t ptid)
913 {
914 struct lwp_info *lwp = find_lwp_pid (ptid);
915
916 /* We assume we always know if a thread exits. If a whole process
917 exited but we still haven't been able to report it to GDB, we'll
918 hold on to the last lwp of the dead process. */
919 if (lwp != NULL)
920 return !lwp->dead;
921 else
922 return 0;
923 }
924
925 /* Return 1 if this lwp has an interesting status pending. */
926 static int
927 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
928 {
929 struct lwp_info *lwp = (struct lwp_info *) entry;
930 ptid_t ptid = * (ptid_t *) arg;
931 struct thread_info *thread = get_lwp_thread (lwp);
932
933 /* Check if we're only interested in events from a specific process
934 or its lwps. */
935 if (!ptid_equal (minus_one_ptid, ptid)
936 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
937 return 0;
938
939 thread = get_lwp_thread (lwp);
940
941 /* If we got a `vCont;t', but we haven't reported a stop yet, do
942 report any status pending the LWP may have. */
943 if (thread->last_resume_kind == resume_stop
944 && thread->last_status.kind == TARGET_WAITKIND_STOPPED)
945 return 0;
946
947 return lwp->status_pending_p;
948 }
949
950 static int
951 same_lwp (struct inferior_list_entry *entry, void *data)
952 {
953 ptid_t ptid = *(ptid_t *) data;
954 int lwp;
955
956 if (ptid_get_lwp (ptid) != 0)
957 lwp = ptid_get_lwp (ptid);
958 else
959 lwp = ptid_get_pid (ptid);
960
961 if (ptid_get_lwp (entry->id) == lwp)
962 return 1;
963
964 return 0;
965 }
966
967 struct lwp_info *
968 find_lwp_pid (ptid_t ptid)
969 {
970 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
971 }
972
973 static struct lwp_info *
974 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
975 {
976 int ret;
977 int to_wait_for = -1;
978 struct lwp_info *child = NULL;
979
980 if (debug_threads)
981 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
982
983 if (ptid_equal (ptid, minus_one_ptid))
984 to_wait_for = -1; /* any child */
985 else
986 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
987
988 options |= __WALL;
989
990 retry:
991
992 ret = my_waitpid (to_wait_for, wstatp, options);
993 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
994 return NULL;
995 else if (ret == -1)
996 perror_with_name ("waitpid");
997
998 if (debug_threads
999 && (!WIFSTOPPED (*wstatp)
1000 || (WSTOPSIG (*wstatp) != 32
1001 && WSTOPSIG (*wstatp) != 33)))
1002 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1003
1004 child = find_lwp_pid (pid_to_ptid (ret));
1005
1006 /* If we didn't find a process, one of two things presumably happened:
1007 - A process we started and then detached from has exited. Ignore it.
1008 - A process we are controlling has forked and the new child's stop
1009 was reported to us by the kernel. Save its PID. */
1010 if (child == NULL && WIFSTOPPED (*wstatp))
1011 {
1012 add_pid_to_list (&stopped_pids, ret);
1013 goto retry;
1014 }
1015 else if (child == NULL)
1016 goto retry;
1017
1018 child->stopped = 1;
1019
1020 child->last_status = *wstatp;
1021
1022 /* Architecture-specific setup after inferior is running.
1023 This needs to happen after we have attached to the inferior
1024 and it is stopped for the first time, but before we access
1025 any inferior registers. */
1026 if (new_inferior)
1027 {
1028 the_low_target.arch_setup ();
1029 #ifdef HAVE_LINUX_REGSETS
1030 memset (disabled_regsets, 0, num_regsets);
1031 #endif
1032 new_inferior = 0;
1033 }
1034
1035 /* Fetch the possibly triggered data watchpoint info and store it in
1036 CHILD.
1037
1038 On some archs, like x86, that use debug registers to set
1039 watchpoints, it's possible that the way to know which watched
1040 address trapped, is to check the register that is used to select
1041 which address to watch. Problem is, between setting the
1042 watchpoint and reading back which data address trapped, the user
1043 may change the set of watchpoints, and, as a consequence, GDB
1044 changes the debug registers in the inferior. To avoid reading
1045 back a stale stopped-data-address when that happens, we cache in
1046 LP the fact that a watchpoint trapped, and the corresponding data
1047 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1048 changes the debug registers meanwhile, we have the cached data we
1049 can rely on. */
1050
1051 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1052 {
1053 if (the_low_target.stopped_by_watchpoint == NULL)
1054 {
1055 child->stopped_by_watchpoint = 0;
1056 }
1057 else
1058 {
1059 struct thread_info *saved_inferior;
1060
1061 saved_inferior = current_inferior;
1062 current_inferior = get_lwp_thread (child);
1063
1064 child->stopped_by_watchpoint
1065 = the_low_target.stopped_by_watchpoint ();
1066
1067 if (child->stopped_by_watchpoint)
1068 {
1069 if (the_low_target.stopped_data_address != NULL)
1070 child->stopped_data_address
1071 = the_low_target.stopped_data_address ();
1072 else
1073 child->stopped_data_address = 0;
1074 }
1075
1076 current_inferior = saved_inferior;
1077 }
1078 }
1079
1080 /* Store the STOP_PC, with adjustment applied. This depends on the
1081 architecture being defined already (so that CHILD has a valid
1082 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1083 not). */
1084 if (WIFSTOPPED (*wstatp))
1085 child->stop_pc = get_stop_pc (child);
1086
1087 if (debug_threads
1088 && WIFSTOPPED (*wstatp)
1089 && the_low_target.get_pc != NULL)
1090 {
1091 struct thread_info *saved_inferior = current_inferior;
1092 struct regcache *regcache;
1093 CORE_ADDR pc;
1094
1095 current_inferior = get_lwp_thread (child);
1096 regcache = get_thread_regcache (current_inferior, 1);
1097 pc = (*the_low_target.get_pc) (regcache);
1098 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1099 current_inferior = saved_inferior;
1100 }
1101
1102 return child;
1103 }
1104
1105 /* This function should only be called if the LWP got a SIGTRAP.
1106
1107 Handle any tracepoint steps or hits. Return true if a tracepoint
1108 event was handled, 0 otherwise. */
1109
1110 static int
1111 handle_tracepoints (struct lwp_info *lwp)
1112 {
1113 struct thread_info *tinfo = get_lwp_thread (lwp);
1114 int tpoint_related_event = 0;
1115
1116 /* And we need to be sure that any all-threads-stopping doesn't try
1117 to move threads out of the jump pads, as it could deadlock the
1118 inferior (LWP could be in the jump pad, maybe even holding the
1119 lock.) */
1120
1121 /* Do any necessary step collect actions. */
1122 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1123
1124 /* See if we just hit a tracepoint and do its main collect
1125 actions. */
1126 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1127
1128 if (tpoint_related_event)
1129 {
1130 if (debug_threads)
1131 fprintf (stderr, "got a tracepoint event\n");
1132 return 1;
1133 }
1134
1135 return 0;
1136 }
1137
1138 /* Arrange for a breakpoint to be hit again later. We don't keep the
1139 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1140 will handle the current event, eventually we will resume this LWP,
1141 and this breakpoint will trap again. */
1142
1143 static int
1144 cancel_breakpoint (struct lwp_info *lwp)
1145 {
1146 struct thread_info *saved_inferior;
1147
1148 /* There's nothing to do if we don't support breakpoints. */
1149 if (!supports_breakpoints ())
1150 return 0;
1151
1152 /* breakpoint_at reads from current inferior. */
1153 saved_inferior = current_inferior;
1154 current_inferior = get_lwp_thread (lwp);
1155
1156 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1157 {
1158 if (debug_threads)
1159 fprintf (stderr,
1160 "CB: Push back breakpoint for %s\n",
1161 target_pid_to_str (ptid_of (lwp)));
1162
1163 /* Back up the PC if necessary. */
1164 if (the_low_target.decr_pc_after_break)
1165 {
1166 struct regcache *regcache
1167 = get_thread_regcache (current_inferior, 1);
1168 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1169 }
1170
1171 current_inferior = saved_inferior;
1172 return 1;
1173 }
1174 else
1175 {
1176 if (debug_threads)
1177 fprintf (stderr,
1178 "CB: No breakpoint found at %s for [%s]\n",
1179 paddress (lwp->stop_pc),
1180 target_pid_to_str (ptid_of (lwp)));
1181 }
1182
1183 current_inferior = saved_inferior;
1184 return 0;
1185 }
1186
1187 /* When the event-loop is doing a step-over, this points at the thread
1188 being stepped. */
1189 ptid_t step_over_bkpt;
1190
1191 /* Wait for an event from child PID. If PID is -1, wait for any
1192 child. Store the stop status through the status pointer WSTAT.
1193 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1194 event was found and OPTIONS contains WNOHANG. Return the PID of
1195 the stopped child otherwise. */
1196
1197 static int
1198 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1199 {
1200 struct lwp_info *event_child, *requested_child;
1201
1202 event_child = NULL;
1203 requested_child = NULL;
1204
1205 /* Check for a lwp with a pending status. */
1206
1207 if (ptid_equal (ptid, minus_one_ptid)
1208 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1209 {
1210 event_child = (struct lwp_info *)
1211 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1212 if (debug_threads && event_child)
1213 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1214 }
1215 else
1216 {
1217 requested_child = find_lwp_pid (ptid);
1218
1219 if (requested_child->status_pending_p)
1220 event_child = requested_child;
1221 }
1222
1223 if (event_child != NULL)
1224 {
1225 if (debug_threads)
1226 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1227 lwpid_of (event_child), event_child->status_pending);
1228 *wstat = event_child->status_pending;
1229 event_child->status_pending_p = 0;
1230 event_child->status_pending = 0;
1231 current_inferior = get_lwp_thread (event_child);
1232 return lwpid_of (event_child);
1233 }
1234
1235 /* We only enter this loop if no process has a pending wait status. Thus
1236 any action taken in response to a wait status inside this loop is
1237 responding as soon as we detect the status, not after any pending
1238 events. */
1239 while (1)
1240 {
1241 event_child = linux_wait_for_lwp (ptid, wstat, options);
1242
1243 if ((options & WNOHANG) && event_child == NULL)
1244 {
1245 if (debug_threads)
1246 fprintf (stderr, "WNOHANG set, no event found\n");
1247 return 0;
1248 }
1249
1250 if (event_child == NULL)
1251 error ("event from unknown child");
1252
1253 current_inferior = get_lwp_thread (event_child);
1254
1255 /* Check for thread exit. */
1256 if (! WIFSTOPPED (*wstat))
1257 {
1258 if (debug_threads)
1259 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1260
1261 /* If the last thread is exiting, just return. */
1262 if (last_thread_of_process_p (current_inferior))
1263 {
1264 if (debug_threads)
1265 fprintf (stderr, "LWP %ld is last lwp of process\n",
1266 lwpid_of (event_child));
1267 return lwpid_of (event_child);
1268 }
1269
1270 if (!non_stop)
1271 {
1272 current_inferior = (struct thread_info *) all_threads.head;
1273 if (debug_threads)
1274 fprintf (stderr, "Current inferior is now %ld\n",
1275 lwpid_of (get_thread_lwp (current_inferior)));
1276 }
1277 else
1278 {
1279 current_inferior = NULL;
1280 if (debug_threads)
1281 fprintf (stderr, "Current inferior is now <NULL>\n");
1282 }
1283
1284 /* If we were waiting for this particular child to do something...
1285 well, it did something. */
1286 if (requested_child != NULL)
1287 {
1288 int lwpid = lwpid_of (event_child);
1289
1290 /* Cancel the step-over operation --- the thread that
1291 started it is gone. */
1292 if (finish_step_over (event_child))
1293 unstop_all_lwps (event_child);
1294 delete_lwp (event_child);
1295 return lwpid;
1296 }
1297
1298 delete_lwp (event_child);
1299
1300 /* Wait for a more interesting event. */
1301 continue;
1302 }
1303
1304 if (event_child->must_set_ptrace_flags)
1305 {
1306 linux_enable_event_reporting (lwpid_of (event_child));
1307 event_child->must_set_ptrace_flags = 0;
1308 }
1309
1310 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1311 && *wstat >> 16 != 0)
1312 {
1313 handle_extended_wait (event_child, *wstat);
1314 continue;
1315 }
1316
1317 /* If GDB is not interested in this signal, don't stop other
1318 threads, and don't report it to GDB. Just resume the
1319 inferior right away. We do this for threading-related
1320 signals as well as any that GDB specifically requested we
1321 ignore. But never ignore SIGSTOP if we sent it ourselves,
1322 and do not ignore signals when stepping - they may require
1323 special handling to skip the signal handler. */
1324 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1325 thread library? */
1326 if (WIFSTOPPED (*wstat)
1327 && !event_child->stepping
1328 && (
1329 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1330 (current_process ()->private->thread_db != NULL
1331 && (WSTOPSIG (*wstat) == __SIGRTMIN
1332 || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1333 ||
1334 #endif
1335 (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1336 && !(WSTOPSIG (*wstat) == SIGSTOP
1337 && event_child->stop_expected))))
1338 {
1339 siginfo_t info, *info_p;
1340
1341 if (debug_threads)
1342 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1343 WSTOPSIG (*wstat), lwpid_of (event_child));
1344
1345 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1346 info_p = &info;
1347 else
1348 info_p = NULL;
1349 linux_resume_one_lwp (event_child, event_child->stepping,
1350 WSTOPSIG (*wstat), info_p);
1351 continue;
1352 }
1353
1354 if (WIFSTOPPED (*wstat)
1355 && WSTOPSIG (*wstat) == SIGSTOP
1356 && event_child->stop_expected)
1357 {
1358 int should_stop;
1359
1360 if (debug_threads)
1361 fprintf (stderr, "Expected stop.\n");
1362 event_child->stop_expected = 0;
1363
1364 should_stop = (current_inferior->last_resume_kind == resume_stop
1365 || stopping_threads);
1366
1367 if (!should_stop)
1368 {
1369 linux_resume_one_lwp (event_child,
1370 event_child->stepping, 0, NULL);
1371 continue;
1372 }
1373 }
1374
1375 return lwpid_of (event_child);
1376 }
1377
1378 /* NOTREACHED */
1379 return 0;
1380 }
1381
1382 static int
1383 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1384 {
1385 ptid_t wait_ptid;
1386
1387 if (ptid_is_pid (ptid))
1388 {
1389 /* A request to wait for a specific tgid. This is not possible
1390 with waitpid, so instead, we wait for any child, and leave
1391 children we're not interested in right now with a pending
1392 status to report later. */
1393 wait_ptid = minus_one_ptid;
1394 }
1395 else
1396 wait_ptid = ptid;
1397
1398 while (1)
1399 {
1400 int event_pid;
1401
1402 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1403
1404 if (event_pid > 0
1405 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1406 {
1407 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1408
1409 if (! WIFSTOPPED (*wstat))
1410 mark_lwp_dead (event_child, *wstat);
1411 else
1412 {
1413 event_child->status_pending_p = 1;
1414 event_child->status_pending = *wstat;
1415 }
1416 }
1417 else
1418 return event_pid;
1419 }
1420 }
1421
1422
1423 /* Count the LWP's that have had events. */
1424
1425 static int
1426 count_events_callback (struct inferior_list_entry *entry, void *data)
1427 {
1428 struct lwp_info *lp = (struct lwp_info *) entry;
1429 struct thread_info *thread = get_lwp_thread (lp);
1430 int *count = data;
1431
1432 gdb_assert (count != NULL);
1433
1434 /* Count only resumed LWPs that have a SIGTRAP event pending that
1435 should be reported to GDB. */
1436 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1437 && thread->last_resume_kind != resume_stop
1438 && lp->status_pending_p
1439 && WIFSTOPPED (lp->status_pending)
1440 && WSTOPSIG (lp->status_pending) == SIGTRAP
1441 && !breakpoint_inserted_here (lp->stop_pc))
1442 (*count)++;
1443
1444 return 0;
1445 }
1446
1447 /* Select the LWP (if any) that is currently being single-stepped. */
1448
1449 static int
1450 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1451 {
1452 struct lwp_info *lp = (struct lwp_info *) entry;
1453 struct thread_info *thread = get_lwp_thread (lp);
1454
1455 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1456 && thread->last_resume_kind == resume_step
1457 && lp->status_pending_p)
1458 return 1;
1459 else
1460 return 0;
1461 }
1462
1463 /* Select the Nth LWP that has had a SIGTRAP event that should be
1464 reported to GDB. */
1465
1466 static int
1467 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1468 {
1469 struct lwp_info *lp = (struct lwp_info *) entry;
1470 struct thread_info *thread = get_lwp_thread (lp);
1471 int *selector = data;
1472
1473 gdb_assert (selector != NULL);
1474
1475 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1476 if (thread->last_resume_kind != resume_stop
1477 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1478 && lp->status_pending_p
1479 && WIFSTOPPED (lp->status_pending)
1480 && WSTOPSIG (lp->status_pending) == SIGTRAP
1481 && !breakpoint_inserted_here (lp->stop_pc))
1482 if ((*selector)-- == 0)
1483 return 1;
1484
1485 return 0;
1486 }
1487
1488 static int
1489 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1490 {
1491 struct lwp_info *lp = (struct lwp_info *) entry;
1492 struct thread_info *thread = get_lwp_thread (lp);
1493 struct lwp_info *event_lp = data;
1494
1495 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1496 if (lp == event_lp)
1497 return 0;
1498
1499 /* If a LWP other than the LWP that we're reporting an event for has
1500 hit a GDB breakpoint (as opposed to some random trap signal),
1501 then just arrange for it to hit it again later. We don't keep
1502 the SIGTRAP status and don't forward the SIGTRAP signal to the
1503 LWP. We will handle the current event, eventually we will resume
1504 all LWPs, and this one will get its breakpoint trap again.
1505
1506 If we do not do this, then we run the risk that the user will
1507 delete or disable the breakpoint, but the LWP will have already
1508 tripped on it. */
1509
1510 if (thread->last_resume_kind != resume_stop
1511 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1512 && lp->status_pending_p
1513 && WIFSTOPPED (lp->status_pending)
1514 && WSTOPSIG (lp->status_pending) == SIGTRAP
1515 && !lp->stepping
1516 && !lp->stopped_by_watchpoint
1517 && cancel_breakpoint (lp))
1518 /* Throw away the SIGTRAP. */
1519 lp->status_pending_p = 0;
1520
1521 return 0;
1522 }
1523
1524 /* Select one LWP out of those that have events pending. */
1525
1526 static void
1527 select_event_lwp (struct lwp_info **orig_lp)
1528 {
1529 int num_events = 0;
1530 int random_selector;
1531 struct lwp_info *event_lp;
1532
1533 /* Give preference to any LWP that is being single-stepped. */
1534 event_lp
1535 = (struct lwp_info *) find_inferior (&all_lwps,
1536 select_singlestep_lwp_callback, NULL);
1537 if (event_lp != NULL)
1538 {
1539 if (debug_threads)
1540 fprintf (stderr,
1541 "SEL: Select single-step %s\n",
1542 target_pid_to_str (ptid_of (event_lp)));
1543 }
1544 else
1545 {
1546 /* No single-stepping LWP. Select one at random, out of those
1547 which have had SIGTRAP events. */
1548
1549 /* First see how many SIGTRAP events we have. */
1550 find_inferior (&all_lwps, count_events_callback, &num_events);
1551
1552 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1553 random_selector = (int)
1554 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1555
1556 if (debug_threads && num_events > 1)
1557 fprintf (stderr,
1558 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1559 num_events, random_selector);
1560
1561 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1562 select_event_lwp_callback,
1563 &random_selector);
1564 }
1565
1566 if (event_lp != NULL)
1567 {
1568 /* Switch the event LWP. */
1569 *orig_lp = event_lp;
1570 }
1571 }
1572
1573 /* Set this inferior LWP's state as "want-stopped". We won't resume
1574 this LWP until the client gives us another action for it. */
1575
1576 static void
1577 gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1578 {
1579 struct lwp_info *lwp = (struct lwp_info *) entry;
1580 struct thread_info *thread = get_lwp_thread (lwp);
1581
1582 /* Most threads are stopped implicitly (all-stop); tag that with
1583 signal 0. The thread being explicitly reported stopped to the
1584 client, gets it's status fixed up afterwards. */
1585 thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1586 thread->last_status.value.sig = TARGET_SIGNAL_0;
1587
1588 thread->last_resume_kind = resume_stop;
1589 }
1590
1591 /* Set all LWP's states as "want-stopped". */
1592
1593 static void
1594 gdb_wants_all_stopped (void)
1595 {
1596 for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1597 }
1598
1599 /* Wait for process, returns status. */
1600
1601 static ptid_t
1602 linux_wait_1 (ptid_t ptid,
1603 struct target_waitstatus *ourstatus, int target_options)
1604 {
1605 int w;
1606 struct lwp_info *event_child;
1607 int options;
1608 int pid;
1609 int step_over_finished;
1610 int bp_explains_trap;
1611 int maybe_internal_trap;
1612 int report_to_gdb;
1613 int trace_event;
1614
1615 /* Translate generic target options into linux options. */
1616 options = __WALL;
1617 if (target_options & TARGET_WNOHANG)
1618 options |= WNOHANG;
1619
1620 retry:
1621 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1622
1623 /* If we were only supposed to resume one thread, only wait for
1624 that thread - if it's still alive. If it died, however - which
1625 can happen if we're coming from the thread death case below -
1626 then we need to make sure we restart the other threads. We could
1627 pick a thread at random or restart all; restarting all is less
1628 arbitrary. */
1629 if (!non_stop
1630 && !ptid_equal (cont_thread, null_ptid)
1631 && !ptid_equal (cont_thread, minus_one_ptid))
1632 {
1633 struct thread_info *thread;
1634
1635 thread = (struct thread_info *) find_inferior_id (&all_threads,
1636 cont_thread);
1637
1638 /* No stepping, no signal - unless one is pending already, of course. */
1639 if (thread == NULL)
1640 {
1641 struct thread_resume resume_info;
1642 resume_info.thread = minus_one_ptid;
1643 resume_info.kind = resume_continue;
1644 resume_info.sig = 0;
1645 linux_resume (&resume_info, 1);
1646 }
1647 else
1648 ptid = cont_thread;
1649 }
1650
1651 if (ptid_equal (step_over_bkpt, null_ptid))
1652 pid = linux_wait_for_event (ptid, &w, options);
1653 else
1654 {
1655 if (debug_threads)
1656 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
1657 target_pid_to_str (step_over_bkpt));
1658 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
1659 }
1660
1661 if (pid == 0) /* only if TARGET_WNOHANG */
1662 return null_ptid;
1663
1664 event_child = get_thread_lwp (current_inferior);
1665
1666 /* If we are waiting for a particular child, and it exited,
1667 linux_wait_for_event will return its exit status. Similarly if
1668 the last child exited. If this is not the last child, however,
1669 do not report it as exited until there is a 'thread exited' response
1670 available in the remote protocol. Instead, just wait for another event.
1671 This should be safe, because if the thread crashed we will already
1672 have reported the termination signal to GDB; that should stop any
1673 in-progress stepping operations, etc.
1674
1675 Report the exit status of the last thread to exit. This matches
1676 LinuxThreads' behavior. */
1677
1678 if (last_thread_of_process_p (current_inferior))
1679 {
1680 if (WIFEXITED (w) || WIFSIGNALED (w))
1681 {
1682 if (WIFEXITED (w))
1683 {
1684 ourstatus->kind = TARGET_WAITKIND_EXITED;
1685 ourstatus->value.integer = WEXITSTATUS (w);
1686
1687 if (debug_threads)
1688 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1689 }
1690 else
1691 {
1692 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1693 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1694
1695 if (debug_threads)
1696 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1697
1698 }
1699
1700 return pid_to_ptid (pid);
1701 }
1702 }
1703 else
1704 {
1705 if (!WIFSTOPPED (w))
1706 goto retry;
1707 }
1708
1709 /* If this event was not handled before, and is not a SIGTRAP, we
1710 report it. SIGILL and SIGSEGV are also treated as traps in case
1711 a breakpoint is inserted at the current PC. If this target does
1712 not support internal breakpoints at all, we also report the
1713 SIGTRAP without further processing; it's of no concern to us. */
1714 maybe_internal_trap
1715 = (supports_breakpoints ()
1716 && (WSTOPSIG (w) == SIGTRAP
1717 || ((WSTOPSIG (w) == SIGILL
1718 || WSTOPSIG (w) == SIGSEGV)
1719 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
1720
1721 if (maybe_internal_trap)
1722 {
1723 /* Handle anything that requires bookkeeping before deciding to
1724 report the event or continue waiting. */
1725
1726 /* First check if we can explain the SIGTRAP with an internal
1727 breakpoint, or if we should possibly report the event to GDB.
1728 Do this before anything that may remove or insert a
1729 breakpoint. */
1730 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
1731
1732 /* We have a SIGTRAP, possibly a step-over dance has just
1733 finished. If so, tweak the state machine accordingly,
1734 reinsert breakpoints and delete any reinsert (software
1735 single-step) breakpoints. */
1736 step_over_finished = finish_step_over (event_child);
1737
1738 /* Now invoke the callbacks of any internal breakpoints there. */
1739 check_breakpoints (event_child->stop_pc);
1740
1741 /* Handle tracepoint data collecting. This may overflow the
1742 trace buffer, and cause a tracing stop, removing
1743 breakpoints. */
1744 trace_event = handle_tracepoints (event_child);
1745
1746 if (bp_explains_trap)
1747 {
1748 /* If we stepped or ran into an internal breakpoint, we've
1749 already handled it. So next time we resume (from this
1750 PC), we should step over it. */
1751 if (debug_threads)
1752 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1753
1754 if (breakpoint_here (event_child->stop_pc))
1755 event_child->need_step_over = 1;
1756 }
1757 }
1758 else
1759 {
1760 /* We have some other signal, possibly a step-over dance was in
1761 progress, and it should be cancelled too. */
1762 step_over_finished = finish_step_over (event_child);
1763
1764 trace_event = 0;
1765 }
1766
1767 /* We have all the data we need. Either report the event to GDB, or
1768 resume threads and keep waiting for more. */
1769
1770 /* Check If GDB would be interested in this event. If GDB wanted
1771 this thread to single step, we always want to report the SIGTRAP,
1772 and let GDB handle it. Watchpoints should always be reported.
1773 So should signals we can't explain. A SIGTRAP we can't explain
1774 could be a GDB breakpoint --- we may or not support Z0
1775 breakpoints. If we do, we're be able to handle GDB breakpoints
1776 on top of internal breakpoints, by handling the internal
1777 breakpoint and still reporting the event to GDB. If we don't,
1778 we're out of luck, GDB won't see the breakpoint hit. */
1779 report_to_gdb = (!maybe_internal_trap
1780 || current_inferior->last_resume_kind == resume_step
1781 || event_child->stopped_by_watchpoint
1782 || (!step_over_finished && !bp_explains_trap && !trace_event)
1783 || gdb_breakpoint_here (event_child->stop_pc));
1784
1785 /* We found no reason GDB would want us to stop. We either hit one
1786 of our own breakpoints, or finished an internal step GDB
1787 shouldn't know about. */
1788 if (!report_to_gdb)
1789 {
1790 if (debug_threads)
1791 {
1792 if (bp_explains_trap)
1793 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1794 if (step_over_finished)
1795 fprintf (stderr, "Step-over finished.\n");
1796 if (trace_event)
1797 fprintf (stderr, "Tracepoint event.\n");
1798 }
1799
1800 /* We're not reporting this breakpoint to GDB, so apply the
1801 decr_pc_after_break adjustment to the inferior's regcache
1802 ourselves. */
1803
1804 if (the_low_target.set_pc != NULL)
1805 {
1806 struct regcache *regcache
1807 = get_thread_regcache (get_lwp_thread (event_child), 1);
1808 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
1809 }
1810
1811 /* We've finished stepping over a breakpoint. We've stopped all
1812 LWPs momentarily except the stepping one. This is where we
1813 resume them all again. We're going to keep waiting, so use
1814 proceed, which handles stepping over the next breakpoint. */
1815 if (debug_threads)
1816 fprintf (stderr, "proceeding all threads.\n");
1817 proceed_all_lwps ();
1818 goto retry;
1819 }
1820
1821 if (debug_threads)
1822 {
1823 if (current_inferior->last_resume_kind == resume_step)
1824 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
1825 if (event_child->stopped_by_watchpoint)
1826 fprintf (stderr, "Stopped by watchpoint.\n");
1827 if (gdb_breakpoint_here (event_child->stop_pc))
1828 fprintf (stderr, "Stopped by GDB breakpoint.\n");
1829 if (debug_threads)
1830 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
1831 }
1832
1833 /* Alright, we're going to report a stop. */
1834
1835 if (!non_stop)
1836 {
1837 /* In all-stop, stop all threads. */
1838 stop_all_lwps ();
1839
1840 /* If we're not waiting for a specific LWP, choose an event LWP
1841 from among those that have had events. Giving equal priority
1842 to all LWPs that have had events helps prevent
1843 starvation. */
1844 if (ptid_equal (ptid, minus_one_ptid))
1845 {
1846 event_child->status_pending_p = 1;
1847 event_child->status_pending = w;
1848
1849 select_event_lwp (&event_child);
1850
1851 event_child->status_pending_p = 0;
1852 w = event_child->status_pending;
1853 }
1854
1855 /* Now that we've selected our final event LWP, cancel any
1856 breakpoints in other LWPs that have hit a GDB breakpoint.
1857 See the comment in cancel_breakpoints_callback to find out
1858 why. */
1859 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
1860 }
1861 else
1862 {
1863 /* If we just finished a step-over, then all threads had been
1864 momentarily paused. In all-stop, that's fine, we want
1865 threads stopped by now anyway. In non-stop, we need to
1866 re-resume threads that GDB wanted to be running. */
1867 if (step_over_finished)
1868 unstop_all_lwps (event_child);
1869 }
1870
1871 ourstatus->kind = TARGET_WAITKIND_STOPPED;
1872
1873 /* Do this before the gdb_wants_all_stopped calls below, since they
1874 always set last_resume_kind to resume_stop. */
1875 if (current_inferior->last_resume_kind == resume_stop
1876 && WSTOPSIG (w) == SIGSTOP)
1877 {
1878 /* A thread that has been requested to stop by GDB with vCont;t,
1879 and it stopped cleanly, so report as SIG0. The use of
1880 SIGSTOP is an implementation detail. */
1881 ourstatus->value.sig = TARGET_SIGNAL_0;
1882 }
1883 else if (current_inferior->last_resume_kind == resume_stop
1884 && WSTOPSIG (w) != SIGSTOP)
1885 {
1886 /* A thread that has been requested to stop by GDB with vCont;t,
1887 but, it stopped for other reasons. */
1888 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1889 }
1890 else
1891 {
1892 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1893 }
1894
1895 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
1896
1897 if (!non_stop)
1898 {
1899 /* From GDB's perspective, all-stop mode always stops all
1900 threads implicitly. Tag all threads as "want-stopped". */
1901 gdb_wants_all_stopped ();
1902 }
1903 else
1904 {
1905 /* We're reporting this LWP as stopped. Update it's
1906 "want-stopped" state to what the client wants, until it gets
1907 a new resume action. */
1908 gdb_wants_lwp_stopped (&event_child->head);
1909 }
1910
1911 if (debug_threads)
1912 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1913 target_pid_to_str (ptid_of (event_child)),
1914 ourstatus->kind,
1915 ourstatus->value.sig);
1916
1917 get_lwp_thread (event_child)->last_status = *ourstatus;
1918 return ptid_of (event_child);
1919 }
1920
1921 /* Get rid of any pending event in the pipe. */
1922 static void
1923 async_file_flush (void)
1924 {
1925 int ret;
1926 char buf;
1927
1928 do
1929 ret = read (linux_event_pipe[0], &buf, 1);
1930 while (ret >= 0 || (ret == -1 && errno == EINTR));
1931 }
1932
1933 /* Put something in the pipe, so the event loop wakes up. */
1934 static void
1935 async_file_mark (void)
1936 {
1937 int ret;
1938
1939 async_file_flush ();
1940
1941 do
1942 ret = write (linux_event_pipe[1], "+", 1);
1943 while (ret == 0 || (ret == -1 && errno == EINTR));
1944
1945 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1946 be awakened anyway. */
1947 }
1948
1949 static ptid_t
1950 linux_wait (ptid_t ptid,
1951 struct target_waitstatus *ourstatus, int target_options)
1952 {
1953 ptid_t event_ptid;
1954
1955 if (debug_threads)
1956 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1957
1958 /* Flush the async file first. */
1959 if (target_is_async_p ())
1960 async_file_flush ();
1961
1962 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1963
1964 /* If at least one stop was reported, there may be more. A single
1965 SIGCHLD can signal more than one child stop. */
1966 if (target_is_async_p ()
1967 && (target_options & TARGET_WNOHANG) != 0
1968 && !ptid_equal (event_ptid, null_ptid))
1969 async_file_mark ();
1970
1971 return event_ptid;
1972 }
1973
1974 /* Send a signal to an LWP. */
1975
1976 static int
1977 kill_lwp (unsigned long lwpid, int signo)
1978 {
1979 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1980 fails, then we are not using nptl threads and we should be using kill. */
1981
1982 #ifdef __NR_tkill
1983 {
1984 static int tkill_failed;
1985
1986 if (!tkill_failed)
1987 {
1988 int ret;
1989
1990 errno = 0;
1991 ret = syscall (__NR_tkill, lwpid, signo);
1992 if (errno != ENOSYS)
1993 return ret;
1994 tkill_failed = 1;
1995 }
1996 }
1997 #endif
1998
1999 return kill (lwpid, signo);
2000 }
2001
2002 static void
2003 send_sigstop (struct lwp_info *lwp)
2004 {
2005 int pid;
2006
2007 pid = lwpid_of (lwp);
2008
2009 /* If we already have a pending stop signal for this process, don't
2010 send another. */
2011 if (lwp->stop_expected)
2012 {
2013 if (debug_threads)
2014 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2015
2016 return;
2017 }
2018
2019 if (debug_threads)
2020 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2021
2022 lwp->stop_expected = 1;
2023 kill_lwp (pid, SIGSTOP);
2024 }
2025
2026 static void
2027 send_sigstop_callback (struct inferior_list_entry *entry)
2028 {
2029 struct lwp_info *lwp = (struct lwp_info *) entry;
2030
2031 if (lwp->stopped)
2032 return;
2033
2034 send_sigstop (lwp);
2035 }
2036
2037 static void
2038 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2039 {
2040 /* It's dead, really. */
2041 lwp->dead = 1;
2042
2043 /* Store the exit status for later. */
2044 lwp->status_pending_p = 1;
2045 lwp->status_pending = wstat;
2046
2047 /* Prevent trying to stop it. */
2048 lwp->stopped = 1;
2049
2050 /* No further stops are expected from a dead lwp. */
2051 lwp->stop_expected = 0;
2052 }
2053
2054 static void
2055 wait_for_sigstop (struct inferior_list_entry *entry)
2056 {
2057 struct lwp_info *lwp = (struct lwp_info *) entry;
2058 struct thread_info *saved_inferior;
2059 int wstat;
2060 ptid_t saved_tid;
2061 ptid_t ptid;
2062 int pid;
2063
2064 if (lwp->stopped)
2065 {
2066 if (debug_threads)
2067 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2068 lwpid_of (lwp));
2069 return;
2070 }
2071
2072 saved_inferior = current_inferior;
2073 if (saved_inferior != NULL)
2074 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2075 else
2076 saved_tid = null_ptid; /* avoid bogus unused warning */
2077
2078 ptid = lwp->head.id;
2079
2080 if (debug_threads)
2081 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2082
2083 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2084
2085 /* If we stopped with a non-SIGSTOP signal, save it for later
2086 and record the pending SIGSTOP. If the process exited, just
2087 return. */
2088 if (WIFSTOPPED (wstat))
2089 {
2090 if (debug_threads)
2091 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2092 lwpid_of (lwp), WSTOPSIG (wstat));
2093
2094 if (WSTOPSIG (wstat) != SIGSTOP)
2095 {
2096 if (debug_threads)
2097 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2098 lwpid_of (lwp), wstat);
2099
2100 lwp->status_pending_p = 1;
2101 lwp->status_pending = wstat;
2102 }
2103 }
2104 else
2105 {
2106 if (debug_threads)
2107 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2108
2109 lwp = find_lwp_pid (pid_to_ptid (pid));
2110 if (lwp)
2111 {
2112 /* Leave this status pending for the next time we're able to
2113 report it. In the mean time, we'll report this lwp as
2114 dead to GDB, so GDB doesn't try to read registers and
2115 memory from it. This can only happen if this was the
2116 last thread of the process; otherwise, PID is removed
2117 from the thread tables before linux_wait_for_event
2118 returns. */
2119 mark_lwp_dead (lwp, wstat);
2120 }
2121 }
2122
2123 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2124 current_inferior = saved_inferior;
2125 else
2126 {
2127 if (debug_threads)
2128 fprintf (stderr, "Previously current thread died.\n");
2129
2130 if (non_stop)
2131 {
2132 /* We can't change the current inferior behind GDB's back,
2133 otherwise, a subsequent command may apply to the wrong
2134 process. */
2135 current_inferior = NULL;
2136 }
2137 else
2138 {
2139 /* Set a valid thread as current. */
2140 set_desired_inferior (0);
2141 }
2142 }
2143 }
2144
2145 static void
2146 stop_all_lwps (void)
2147 {
2148 stopping_threads = 1;
2149 for_each_inferior (&all_lwps, send_sigstop_callback);
2150 for_each_inferior (&all_lwps, wait_for_sigstop);
2151 stopping_threads = 0;
2152 }
2153
2154 /* Resume execution of the inferior process.
2155 If STEP is nonzero, single-step it.
2156 If SIGNAL is nonzero, give it that signal. */
2157
2158 static void
2159 linux_resume_one_lwp (struct lwp_info *lwp,
2160 int step, int signal, siginfo_t *info)
2161 {
2162 struct thread_info *saved_inferior;
2163
2164 if (lwp->stopped == 0)
2165 return;
2166
2167 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2168 user used the "jump" command, or "set $pc = foo"). */
2169 if (lwp->stop_pc != get_pc (lwp))
2170 {
2171 /* Collecting 'while-stepping' actions doesn't make sense
2172 anymore. */
2173 release_while_stepping_state_list (get_lwp_thread (lwp));
2174 }
2175
2176 /* If we have pending signals or status, and a new signal, enqueue the
2177 signal. Also enqueue the signal if we are waiting to reinsert a
2178 breakpoint; it will be picked up again below. */
2179 if (signal != 0
2180 && (lwp->status_pending_p || lwp->pending_signals != NULL
2181 || lwp->bp_reinsert != 0))
2182 {
2183 struct pending_signals *p_sig;
2184 p_sig = xmalloc (sizeof (*p_sig));
2185 p_sig->prev = lwp->pending_signals;
2186 p_sig->signal = signal;
2187 if (info == NULL)
2188 memset (&p_sig->info, 0, sizeof (siginfo_t));
2189 else
2190 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2191 lwp->pending_signals = p_sig;
2192 }
2193
2194 if (lwp->status_pending_p)
2195 {
2196 if (debug_threads)
2197 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2198 " has pending status\n",
2199 lwpid_of (lwp), step ? "step" : "continue", signal,
2200 lwp->stop_expected ? "expected" : "not expected");
2201 return;
2202 }
2203
2204 saved_inferior = current_inferior;
2205 current_inferior = get_lwp_thread (lwp);
2206
2207 if (debug_threads)
2208 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2209 lwpid_of (lwp), step ? "step" : "continue", signal,
2210 lwp->stop_expected ? "expected" : "not expected");
2211
2212 /* This bit needs some thinking about. If we get a signal that
2213 we must report while a single-step reinsert is still pending,
2214 we often end up resuming the thread. It might be better to
2215 (ew) allow a stack of pending events; then we could be sure that
2216 the reinsert happened right away and not lose any signals.
2217
2218 Making this stack would also shrink the window in which breakpoints are
2219 uninserted (see comment in linux_wait_for_lwp) but not enough for
2220 complete correctness, so it won't solve that problem. It may be
2221 worthwhile just to solve this one, however. */
2222 if (lwp->bp_reinsert != 0)
2223 {
2224 if (debug_threads)
2225 fprintf (stderr, " pending reinsert at 0x%s\n",
2226 paddress (lwp->bp_reinsert));
2227
2228 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2229 {
2230 if (step == 0)
2231 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2232
2233 step = 1;
2234 }
2235
2236 /* Postpone any pending signal. It was enqueued above. */
2237 signal = 0;
2238 }
2239
2240 /* If we have while-stepping actions in this thread set it stepping.
2241 If we have a signal to deliver, it may or may not be set to
2242 SIG_IGN, we don't know. Assume so, and allow collecting
2243 while-stepping into a signal handler. A possible smart thing to
2244 do would be to set an internal breakpoint at the signal return
2245 address, continue, and carry on catching this while-stepping
2246 action only when that breakpoint is hit. A future
2247 enhancement. */
2248 if (get_lwp_thread (lwp)->while_stepping != NULL
2249 && can_hardware_single_step ())
2250 {
2251 if (debug_threads)
2252 fprintf (stderr,
2253 "lwp %ld has a while-stepping action -> forcing step.\n",
2254 lwpid_of (lwp));
2255 step = 1;
2256 }
2257
2258 if (debug_threads && the_low_target.get_pc != NULL)
2259 {
2260 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2261 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2262 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2263 }
2264
2265 /* If we have pending signals, consume one unless we are trying to reinsert
2266 a breakpoint. */
2267 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
2268 {
2269 struct pending_signals **p_sig;
2270
2271 p_sig = &lwp->pending_signals;
2272 while ((*p_sig)->prev != NULL)
2273 p_sig = &(*p_sig)->prev;
2274
2275 signal = (*p_sig)->signal;
2276 if ((*p_sig)->info.si_signo != 0)
2277 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2278
2279 free (*p_sig);
2280 *p_sig = NULL;
2281 }
2282
2283 if (the_low_target.prepare_to_resume != NULL)
2284 the_low_target.prepare_to_resume (lwp);
2285
2286 regcache_invalidate_one ((struct inferior_list_entry *)
2287 get_lwp_thread (lwp));
2288 errno = 0;
2289 lwp->stopped = 0;
2290 lwp->stopped_by_watchpoint = 0;
2291 lwp->stepping = step;
2292 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2293 /* Coerce to a uintptr_t first to avoid potential gcc warning
2294 of coercing an 8 byte integer to a 4 byte pointer. */
2295 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2296
2297 current_inferior = saved_inferior;
2298 if (errno)
2299 {
2300 /* ESRCH from ptrace either means that the thread was already
2301 running (an error) or that it is gone (a race condition). If
2302 it's gone, we will get a notification the next time we wait,
2303 so we can ignore the error. We could differentiate these
2304 two, but it's tricky without waiting; the thread still exists
2305 as a zombie, so sending it signal 0 would succeed. So just
2306 ignore ESRCH. */
2307 if (errno == ESRCH)
2308 return;
2309
2310 perror_with_name ("ptrace");
2311 }
2312 }
2313
2314 struct thread_resume_array
2315 {
2316 struct thread_resume *resume;
2317 size_t n;
2318 };
2319
2320 /* This function is called once per thread. We look up the thread
2321 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2322 resume request.
2323
2324 This algorithm is O(threads * resume elements), but resume elements
2325 is small (and will remain small at least until GDB supports thread
2326 suspension). */
2327 static int
2328 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2329 {
2330 struct lwp_info *lwp;
2331 struct thread_info *thread;
2332 int ndx;
2333 struct thread_resume_array *r;
2334
2335 thread = (struct thread_info *) entry;
2336 lwp = get_thread_lwp (thread);
2337 r = arg;
2338
2339 for (ndx = 0; ndx < r->n; ndx++)
2340 {
2341 ptid_t ptid = r->resume[ndx].thread;
2342 if (ptid_equal (ptid, minus_one_ptid)
2343 || ptid_equal (ptid, entry->id)
2344 || (ptid_is_pid (ptid)
2345 && (ptid_get_pid (ptid) == pid_of (lwp)))
2346 || (ptid_get_lwp (ptid) == -1
2347 && (ptid_get_pid (ptid) == pid_of (lwp))))
2348 {
2349 if (r->resume[ndx].kind == resume_stop
2350 && thread->last_resume_kind == resume_stop)
2351 {
2352 if (debug_threads)
2353 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2354 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2355 ? "stopped"
2356 : "stopping",
2357 lwpid_of (lwp));
2358
2359 continue;
2360 }
2361
2362 lwp->resume = &r->resume[ndx];
2363 thread->last_resume_kind = lwp->resume->kind;
2364 return 0;
2365 }
2366 }
2367
2368 /* No resume action for this thread. */
2369 lwp->resume = NULL;
2370
2371 return 0;
2372 }
2373
2374
2375 /* Set *FLAG_P if this lwp has an interesting status pending. */
2376 static int
2377 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
2378 {
2379 struct lwp_info *lwp = (struct lwp_info *) entry;
2380
2381 /* LWPs which will not be resumed are not interesting, because
2382 we might not wait for them next time through linux_wait. */
2383 if (lwp->resume == NULL)
2384 return 0;
2385
2386 if (lwp->status_pending_p)
2387 * (int *) flag_p = 1;
2388
2389 return 0;
2390 }
2391
2392 /* Return 1 if this lwp that GDB wants running is stopped at an
2393 internal breakpoint that we need to step over. It assumes that any
2394 required STOP_PC adjustment has already been propagated to the
2395 inferior's regcache. */
2396
2397 static int
2398 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
2399 {
2400 struct lwp_info *lwp = (struct lwp_info *) entry;
2401 struct thread_info *thread;
2402 struct thread_info *saved_inferior;
2403 CORE_ADDR pc;
2404
2405 /* LWPs which will not be resumed are not interesting, because we
2406 might not wait for them next time through linux_wait. */
2407
2408 if (!lwp->stopped)
2409 {
2410 if (debug_threads)
2411 fprintf (stderr,
2412 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2413 lwpid_of (lwp));
2414 return 0;
2415 }
2416
2417 thread = get_lwp_thread (lwp);
2418
2419 if (thread->last_resume_kind == resume_stop)
2420 {
2421 if (debug_threads)
2422 fprintf (stderr,
2423 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2424 lwpid_of (lwp));
2425 return 0;
2426 }
2427
2428 if (!lwp->need_step_over)
2429 {
2430 if (debug_threads)
2431 fprintf (stderr,
2432 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
2433 }
2434
2435 if (lwp->status_pending_p)
2436 {
2437 if (debug_threads)
2438 fprintf (stderr,
2439 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2440 lwpid_of (lwp));
2441 return 0;
2442 }
2443
2444 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2445 or we have. */
2446 pc = get_pc (lwp);
2447
2448 /* If the PC has changed since we stopped, then don't do anything,
2449 and let the breakpoint/tracepoint be hit. This happens if, for
2450 instance, GDB handled the decr_pc_after_break subtraction itself,
2451 GDB is OOL stepping this thread, or the user has issued a "jump"
2452 command, or poked thread's registers herself. */
2453 if (pc != lwp->stop_pc)
2454 {
2455 if (debug_threads)
2456 fprintf (stderr,
2457 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2458 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2459 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
2460
2461 lwp->need_step_over = 0;
2462 return 0;
2463 }
2464
2465 saved_inferior = current_inferior;
2466 current_inferior = thread;
2467
2468 /* We can only step over breakpoints we know about. */
2469 if (breakpoint_here (pc))
2470 {
2471 /* Don't step over a breakpoint that GDB expects to hit
2472 though. */
2473 if (gdb_breakpoint_here (pc))
2474 {
2475 if (debug_threads)
2476 fprintf (stderr,
2477 "Need step over [LWP %ld]? yes, but found"
2478 " GDB breakpoint at 0x%s; skipping step over\n",
2479 lwpid_of (lwp), paddress (pc));
2480
2481 current_inferior = saved_inferior;
2482 return 0;
2483 }
2484 else
2485 {
2486 if (debug_threads)
2487 fprintf (stderr,
2488 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2489 lwpid_of (lwp), paddress (pc));
2490
2491 /* We've found an lwp that needs stepping over --- return 1 so
2492 that find_inferior stops looking. */
2493 current_inferior = saved_inferior;
2494
2495 /* If the step over is cancelled, this is set again. */
2496 lwp->need_step_over = 0;
2497 return 1;
2498 }
2499 }
2500
2501 current_inferior = saved_inferior;
2502
2503 if (debug_threads)
2504 fprintf (stderr,
2505 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2506 lwpid_of (lwp), paddress (pc));
2507
2508 return 0;
2509 }
2510
2511 /* Start a step-over operation on LWP. When LWP stopped at a
2512 breakpoint, to make progress, we need to remove the breakpoint out
2513 of the way. If we let other threads run while we do that, they may
2514 pass by the breakpoint location and miss hitting it. To avoid
2515 that, a step-over momentarily stops all threads while LWP is
2516 single-stepped while the breakpoint is temporarily uninserted from
2517 the inferior. When the single-step finishes, we reinsert the
2518 breakpoint, and let all threads that are supposed to be running,
2519 run again.
2520
2521 On targets that don't support hardware single-step, we don't
2522 currently support full software single-stepping. Instead, we only
2523 support stepping over the thread event breakpoint, by asking the
2524 low target where to place a reinsert breakpoint. Since this
2525 routine assumes the breakpoint being stepped over is a thread event
2526 breakpoint, it usually assumes the return address of the current
2527 function is a good enough place to set the reinsert breakpoint. */
2528
2529 static int
2530 start_step_over (struct lwp_info *lwp)
2531 {
2532 struct thread_info *saved_inferior;
2533 CORE_ADDR pc;
2534 int step;
2535
2536 if (debug_threads)
2537 fprintf (stderr,
2538 "Starting step-over on LWP %ld. Stopping all threads\n",
2539 lwpid_of (lwp));
2540
2541 stop_all_lwps ();
2542
2543 if (debug_threads)
2544 fprintf (stderr, "Done stopping all threads for step-over.\n");
2545
2546 /* Note, we should always reach here with an already adjusted PC,
2547 either by GDB (if we're resuming due to GDB's request), or by our
2548 caller, if we just finished handling an internal breakpoint GDB
2549 shouldn't care about. */
2550 pc = get_pc (lwp);
2551
2552 saved_inferior = current_inferior;
2553 current_inferior = get_lwp_thread (lwp);
2554
2555 lwp->bp_reinsert = pc;
2556 uninsert_breakpoints_at (pc);
2557
2558 if (can_hardware_single_step ())
2559 {
2560 step = 1;
2561 }
2562 else
2563 {
2564 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
2565 set_reinsert_breakpoint (raddr);
2566 step = 0;
2567 }
2568
2569 current_inferior = saved_inferior;
2570
2571 linux_resume_one_lwp (lwp, step, 0, NULL);
2572
2573 /* Require next event from this LWP. */
2574 step_over_bkpt = lwp->head.id;
2575 return 1;
2576 }
2577
2578 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2579 start_step_over, if still there, and delete any reinsert
2580 breakpoints we've set, on non hardware single-step targets. */
2581
2582 static int
2583 finish_step_over (struct lwp_info *lwp)
2584 {
2585 if (lwp->bp_reinsert != 0)
2586 {
2587 if (debug_threads)
2588 fprintf (stderr, "Finished step over.\n");
2589
2590 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2591 may be no breakpoint to reinsert there by now. */
2592 reinsert_breakpoints_at (lwp->bp_reinsert);
2593
2594 lwp->bp_reinsert = 0;
2595
2596 /* Delete any software-single-step reinsert breakpoints. No
2597 longer needed. We don't have to worry about other threads
2598 hitting this trap, and later not being able to explain it,
2599 because we were stepping over a breakpoint, and we hold all
2600 threads but LWP stopped while doing that. */
2601 if (!can_hardware_single_step ())
2602 delete_reinsert_breakpoints ();
2603
2604 step_over_bkpt = null_ptid;
2605 return 1;
2606 }
2607 else
2608 return 0;
2609 }
2610
2611 /* This function is called once per thread. We check the thread's resume
2612 request, which will tell us whether to resume, step, or leave the thread
2613 stopped; and what signal, if any, it should be sent.
2614
2615 For threads which we aren't explicitly told otherwise, we preserve
2616 the stepping flag; this is used for stepping over gdbserver-placed
2617 breakpoints.
2618
2619 If pending_flags was set in any thread, we queue any needed
2620 signals, since we won't actually resume. We already have a pending
2621 event to report, so we don't need to preserve any step requests;
2622 they should be re-issued if necessary. */
2623
2624 static int
2625 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
2626 {
2627 struct lwp_info *lwp;
2628 struct thread_info *thread;
2629 int step;
2630 int leave_all_stopped = * (int *) arg;
2631 int leave_pending;
2632
2633 thread = (struct thread_info *) entry;
2634 lwp = get_thread_lwp (thread);
2635
2636 if (lwp->resume == NULL)
2637 return 0;
2638
2639 if (lwp->resume->kind == resume_stop)
2640 {
2641 if (debug_threads)
2642 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
2643
2644 if (!lwp->stopped)
2645 {
2646 if (debug_threads)
2647 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
2648
2649 /* Stop the thread, and wait for the event asynchronously,
2650 through the event loop. */
2651 send_sigstop (lwp);
2652 }
2653 else
2654 {
2655 if (debug_threads)
2656 fprintf (stderr, "already stopped LWP %ld\n",
2657 lwpid_of (lwp));
2658
2659 /* The LWP may have been stopped in an internal event that
2660 was not meant to be notified back to GDB (e.g., gdbserver
2661 breakpoint), so we should be reporting a stop event in
2662 this case too. */
2663
2664 /* If the thread already has a pending SIGSTOP, this is a
2665 no-op. Otherwise, something later will presumably resume
2666 the thread and this will cause it to cancel any pending
2667 operation, due to last_resume_kind == resume_stop. If
2668 the thread already has a pending status to report, we
2669 will still report it the next time we wait - see
2670 status_pending_p_callback. */
2671 send_sigstop (lwp);
2672 }
2673
2674 /* For stop requests, we're done. */
2675 lwp->resume = NULL;
2676 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2677 return 0;
2678 }
2679
2680 /* If this thread which is about to be resumed has a pending status,
2681 then don't resume any threads - we can just report the pending
2682 status. Make sure to queue any signals that would otherwise be
2683 sent. In all-stop mode, we do this decision based on if *any*
2684 thread has a pending status. If there's a thread that needs the
2685 step-over-breakpoint dance, then don't resume any other thread
2686 but that particular one. */
2687 leave_pending = (lwp->status_pending_p || leave_all_stopped);
2688
2689 if (!leave_pending)
2690 {
2691 if (debug_threads)
2692 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2693
2694 step = (lwp->resume->kind == resume_step);
2695 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2696 }
2697 else
2698 {
2699 if (debug_threads)
2700 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2701
2702 /* If we have a new signal, enqueue the signal. */
2703 if (lwp->resume->sig != 0)
2704 {
2705 struct pending_signals *p_sig;
2706 p_sig = xmalloc (sizeof (*p_sig));
2707 p_sig->prev = lwp->pending_signals;
2708 p_sig->signal = lwp->resume->sig;
2709 memset (&p_sig->info, 0, sizeof (siginfo_t));
2710
2711 /* If this is the same signal we were previously stopped by,
2712 make sure to queue its siginfo. We can ignore the return
2713 value of ptrace; if it fails, we'll skip
2714 PTRACE_SETSIGINFO. */
2715 if (WIFSTOPPED (lwp->last_status)
2716 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2717 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2718
2719 lwp->pending_signals = p_sig;
2720 }
2721 }
2722
2723 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
2724 lwp->resume = NULL;
2725 return 0;
2726 }
2727
2728 static void
2729 linux_resume (struct thread_resume *resume_info, size_t n)
2730 {
2731 struct thread_resume_array array = { resume_info, n };
2732 struct lwp_info *need_step_over = NULL;
2733 int any_pending;
2734 int leave_all_stopped;
2735
2736 find_inferior (&all_threads, linux_set_resume_request, &array);
2737
2738 /* If there is a thread which would otherwise be resumed, which has
2739 a pending status, then don't resume any threads - we can just
2740 report the pending status. Make sure to queue any signals that
2741 would otherwise be sent. In non-stop mode, we'll apply this
2742 logic to each thread individually. We consume all pending events
2743 before considering to start a step-over (in all-stop). */
2744 any_pending = 0;
2745 if (!non_stop)
2746 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
2747
2748 /* If there is a thread which would otherwise be resumed, which is
2749 stopped at a breakpoint that needs stepping over, then don't
2750 resume any threads - have it step over the breakpoint with all
2751 other threads stopped, then resume all threads again. Make sure
2752 to queue any signals that would otherwise be delivered or
2753 queued. */
2754 if (!any_pending && supports_breakpoints ())
2755 need_step_over
2756 = (struct lwp_info *) find_inferior (&all_lwps,
2757 need_step_over_p, NULL);
2758
2759 leave_all_stopped = (need_step_over != NULL || any_pending);
2760
2761 if (debug_threads)
2762 {
2763 if (need_step_over != NULL)
2764 fprintf (stderr, "Not resuming all, need step over\n");
2765 else if (any_pending)
2766 fprintf (stderr,
2767 "Not resuming, all-stop and found "
2768 "an LWP with pending status\n");
2769 else
2770 fprintf (stderr, "Resuming, no pending status or step over needed\n");
2771 }
2772
2773 /* Even if we're leaving threads stopped, queue all signals we'd
2774 otherwise deliver. */
2775 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
2776
2777 if (need_step_over)
2778 start_step_over (need_step_over);
2779 }
2780
2781 /* This function is called once per thread. We check the thread's
2782 last resume request, which will tell us whether to resume, step, or
2783 leave the thread stopped. Any signal the client requested to be
2784 delivered has already been enqueued at this point.
2785
2786 If any thread that GDB wants running is stopped at an internal
2787 breakpoint that needs stepping over, we start a step-over operation
2788 on that particular thread, and leave all others stopped. */
2789
2790 static void
2791 proceed_one_lwp (struct inferior_list_entry *entry)
2792 {
2793 struct lwp_info *lwp;
2794 struct thread_info *thread;
2795 int step;
2796
2797 lwp = (struct lwp_info *) entry;
2798
2799 if (debug_threads)
2800 fprintf (stderr,
2801 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
2802
2803 if (!lwp->stopped)
2804 {
2805 if (debug_threads)
2806 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
2807 return;
2808 }
2809
2810 thread = get_lwp_thread (lwp);
2811
2812 if (thread->last_resume_kind == resume_stop
2813 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
2814 {
2815 if (debug_threads)
2816 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
2817 lwpid_of (lwp));
2818 return;
2819 }
2820
2821 if (lwp->status_pending_p)
2822 {
2823 if (debug_threads)
2824 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
2825 lwpid_of (lwp));
2826 return;
2827 }
2828
2829 if (lwp->suspended)
2830 {
2831 if (debug_threads)
2832 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
2833 return;
2834 }
2835
2836 if (thread->last_resume_kind == resume_stop)
2837 {
2838 /* We haven't reported this LWP as stopped yet (otherwise, the
2839 last_status.kind check above would catch it, and we wouldn't
2840 reach here. This LWP may have been momentarily paused by a
2841 stop_all_lwps call while handling for example, another LWP's
2842 step-over. In that case, the pending expected SIGSTOP signal
2843 that was queued at vCont;t handling time will have already
2844 been consumed by wait_for_sigstop, and so we need to requeue
2845 another one here. Note that if the LWP already has a SIGSTOP
2846 pending, this is a no-op. */
2847
2848 if (debug_threads)
2849 fprintf (stderr,
2850 "Client wants LWP %ld to stop. "
2851 "Making sure it has a SIGSTOP pending\n",
2852 lwpid_of (lwp));
2853
2854 send_sigstop (lwp);
2855 }
2856
2857 step = thread->last_resume_kind == resume_step;
2858 linux_resume_one_lwp (lwp, step, 0, NULL);
2859 }
2860
2861 /* When we finish a step-over, set threads running again. If there's
2862 another thread that may need a step-over, now's the time to start
2863 it. Eventually, we'll move all threads past their breakpoints. */
2864
2865 static void
2866 proceed_all_lwps (void)
2867 {
2868 struct lwp_info *need_step_over;
2869
2870 /* If there is a thread which would otherwise be resumed, which is
2871 stopped at a breakpoint that needs stepping over, then don't
2872 resume any threads - have it step over the breakpoint with all
2873 other threads stopped, then resume all threads again. */
2874
2875 if (supports_breakpoints ())
2876 {
2877 need_step_over
2878 = (struct lwp_info *) find_inferior (&all_lwps,
2879 need_step_over_p, NULL);
2880
2881 if (need_step_over != NULL)
2882 {
2883 if (debug_threads)
2884 fprintf (stderr, "proceed_all_lwps: found "
2885 "thread %ld needing a step-over\n",
2886 lwpid_of (need_step_over));
2887
2888 start_step_over (need_step_over);
2889 return;
2890 }
2891 }
2892
2893 if (debug_threads)
2894 fprintf (stderr, "Proceeding, no step-over needed\n");
2895
2896 for_each_inferior (&all_lwps, proceed_one_lwp);
2897 }
2898
2899 /* Stopped LWPs that the client wanted to be running, that don't have
2900 pending statuses, are set to run again, except for EXCEPT, if not
2901 NULL. This undoes a stop_all_lwps call. */
2902
2903 static void
2904 unstop_all_lwps (struct lwp_info *except)
2905 {
2906 if (debug_threads)
2907 {
2908 if (except)
2909 fprintf (stderr,
2910 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
2911 else
2912 fprintf (stderr,
2913 "unstopping all lwps\n");
2914 }
2915
2916 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2917 if (except != NULL)
2918 ++except->suspended;
2919
2920 for_each_inferior (&all_lwps, proceed_one_lwp);
2921
2922 if (except != NULL)
2923 --except->suspended;
2924 }
2925
2926 #ifdef HAVE_LINUX_USRREGS
2927
2928 int
2929 register_addr (int regnum)
2930 {
2931 int addr;
2932
2933 if (regnum < 0 || regnum >= the_low_target.num_regs)
2934 error ("Invalid register number %d.", regnum);
2935
2936 addr = the_low_target.regmap[regnum];
2937
2938 return addr;
2939 }
2940
2941 /* Fetch one register. */
2942 static void
2943 fetch_register (struct regcache *regcache, int regno)
2944 {
2945 CORE_ADDR regaddr;
2946 int i, size;
2947 char *buf;
2948 int pid;
2949
2950 if (regno >= the_low_target.num_regs)
2951 return;
2952 if ((*the_low_target.cannot_fetch_register) (regno))
2953 return;
2954
2955 regaddr = register_addr (regno);
2956 if (regaddr == -1)
2957 return;
2958
2959 pid = lwpid_of (get_thread_lwp (current_inferior));
2960 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2961 & - sizeof (PTRACE_XFER_TYPE));
2962 buf = alloca (size);
2963 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2964 {
2965 errno = 0;
2966 *(PTRACE_XFER_TYPE *) (buf + i) =
2967 ptrace (PTRACE_PEEKUSER, pid,
2968 /* Coerce to a uintptr_t first to avoid potential gcc warning
2969 of coercing an 8 byte integer to a 4 byte pointer. */
2970 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2971 regaddr += sizeof (PTRACE_XFER_TYPE);
2972 if (errno != 0)
2973 error ("reading register %d: %s", regno, strerror (errno));
2974 }
2975
2976 if (the_low_target.supply_ptrace_register)
2977 the_low_target.supply_ptrace_register (regcache, regno, buf);
2978 else
2979 supply_register (regcache, regno, buf);
2980 }
2981
2982 /* Fetch all registers, or just one, from the child process. */
2983 static void
2984 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2985 {
2986 if (regno == -1)
2987 for (regno = 0; regno < the_low_target.num_regs; regno++)
2988 fetch_register (regcache, regno);
2989 else
2990 fetch_register (regcache, regno);
2991 }
2992
2993 /* Store our register values back into the inferior.
2994 If REGNO is -1, do this for all registers.
2995 Otherwise, REGNO specifies which register (so we can save time). */
2996 static void
2997 usr_store_inferior_registers (struct regcache *regcache, int regno)
2998 {
2999 CORE_ADDR regaddr;
3000 int i, size;
3001 char *buf;
3002 int pid;
3003
3004 if (regno >= 0)
3005 {
3006 if (regno >= the_low_target.num_regs)
3007 return;
3008
3009 if ((*the_low_target.cannot_store_register) (regno) == 1)
3010 return;
3011
3012 regaddr = register_addr (regno);
3013 if (regaddr == -1)
3014 return;
3015 errno = 0;
3016 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3017 & - sizeof (PTRACE_XFER_TYPE);
3018 buf = alloca (size);
3019 memset (buf, 0, size);
3020
3021 if (the_low_target.collect_ptrace_register)
3022 the_low_target.collect_ptrace_register (regcache, regno, buf);
3023 else
3024 collect_register (regcache, regno, buf);
3025
3026 pid = lwpid_of (get_thread_lwp (current_inferior));
3027 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3028 {
3029 errno = 0;
3030 ptrace (PTRACE_POKEUSER, pid,
3031 /* Coerce to a uintptr_t first to avoid potential gcc warning
3032 about coercing an 8 byte integer to a 4 byte pointer. */
3033 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3034 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3035 if (errno != 0)
3036 {
3037 /* At this point, ESRCH should mean the process is
3038 already gone, in which case we simply ignore attempts
3039 to change its registers. See also the related
3040 comment in linux_resume_one_lwp. */
3041 if (errno == ESRCH)
3042 return;
3043
3044 if ((*the_low_target.cannot_store_register) (regno) == 0)
3045 error ("writing register %d: %s", regno, strerror (errno));
3046 }
3047 regaddr += sizeof (PTRACE_XFER_TYPE);
3048 }
3049 }
3050 else
3051 for (regno = 0; regno < the_low_target.num_regs; regno++)
3052 usr_store_inferior_registers (regcache, regno);
3053 }
3054 #endif /* HAVE_LINUX_USRREGS */
3055
3056
3057
3058 #ifdef HAVE_LINUX_REGSETS
3059
3060 static int
3061 regsets_fetch_inferior_registers (struct regcache *regcache)
3062 {
3063 struct regset_info *regset;
3064 int saw_general_regs = 0;
3065 int pid;
3066 struct iovec iov;
3067
3068 regset = target_regsets;
3069
3070 pid = lwpid_of (get_thread_lwp (current_inferior));
3071 while (regset->size >= 0)
3072 {
3073 void *buf, *data;
3074 int nt_type, res;
3075
3076 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3077 {
3078 regset ++;
3079 continue;
3080 }
3081
3082 buf = xmalloc (regset->size);
3083
3084 nt_type = regset->nt_type;
3085 if (nt_type)
3086 {
3087 iov.iov_base = buf;
3088 iov.iov_len = regset->size;
3089 data = (void *) &iov;
3090 }
3091 else
3092 data = buf;
3093
3094 #ifndef __sparc__
3095 res = ptrace (regset->get_request, pid, nt_type, data);
3096 #else
3097 res = ptrace (regset->get_request, pid, data, nt_type);
3098 #endif
3099 if (res < 0)
3100 {
3101 if (errno == EIO)
3102 {
3103 /* If we get EIO on a regset, do not try it again for
3104 this process. */
3105 disabled_regsets[regset - target_regsets] = 1;
3106 free (buf);
3107 continue;
3108 }
3109 else
3110 {
3111 char s[256];
3112 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3113 pid);
3114 perror (s);
3115 }
3116 }
3117 else if (regset->type == GENERAL_REGS)
3118 saw_general_regs = 1;
3119 regset->store_function (regcache, buf);
3120 regset ++;
3121 free (buf);
3122 }
3123 if (saw_general_regs)
3124 return 0;
3125 else
3126 return 1;
3127 }
3128
3129 static int
3130 regsets_store_inferior_registers (struct regcache *regcache)
3131 {
3132 struct regset_info *regset;
3133 int saw_general_regs = 0;
3134 int pid;
3135 struct iovec iov;
3136
3137 regset = target_regsets;
3138
3139 pid = lwpid_of (get_thread_lwp (current_inferior));
3140 while (regset->size >= 0)
3141 {
3142 void *buf, *data;
3143 int nt_type, res;
3144
3145 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3146 {
3147 regset ++;
3148 continue;
3149 }
3150
3151 buf = xmalloc (regset->size);
3152
3153 /* First fill the buffer with the current register set contents,
3154 in case there are any items in the kernel's regset that are
3155 not in gdbserver's regcache. */
3156
3157 nt_type = regset->nt_type;
3158 if (nt_type)
3159 {
3160 iov.iov_base = buf;
3161 iov.iov_len = regset->size;
3162 data = (void *) &iov;
3163 }
3164 else
3165 data = buf;
3166
3167 #ifndef __sparc__
3168 res = ptrace (regset->get_request, pid, nt_type, data);
3169 #else
3170 res = ptrace (regset->get_request, pid, &iov, data);
3171 #endif
3172
3173 if (res == 0)
3174 {
3175 /* Then overlay our cached registers on that. */
3176 regset->fill_function (regcache, buf);
3177
3178 /* Only now do we write the register set. */
3179 #ifndef __sparc__
3180 res = ptrace (regset->set_request, pid, nt_type, data);
3181 #else
3182 res = ptrace (regset->set_request, pid, data, nt_type);
3183 #endif
3184 }
3185
3186 if (res < 0)
3187 {
3188 if (errno == EIO)
3189 {
3190 /* If we get EIO on a regset, do not try it again for
3191 this process. */
3192 disabled_regsets[regset - target_regsets] = 1;
3193 free (buf);
3194 continue;
3195 }
3196 else if (errno == ESRCH)
3197 {
3198 /* At this point, ESRCH should mean the process is
3199 already gone, in which case we simply ignore attempts
3200 to change its registers. See also the related
3201 comment in linux_resume_one_lwp. */
3202 free (buf);
3203 return 0;
3204 }
3205 else
3206 {
3207 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3208 }
3209 }
3210 else if (regset->type == GENERAL_REGS)
3211 saw_general_regs = 1;
3212 regset ++;
3213 free (buf);
3214 }
3215 if (saw_general_regs)
3216 return 0;
3217 else
3218 return 1;
3219 return 0;
3220 }
3221
3222 #endif /* HAVE_LINUX_REGSETS */
3223
3224
3225 void
3226 linux_fetch_registers (struct regcache *regcache, int regno)
3227 {
3228 #ifdef HAVE_LINUX_REGSETS
3229 if (regsets_fetch_inferior_registers (regcache) == 0)
3230 return;
3231 #endif
3232 #ifdef HAVE_LINUX_USRREGS
3233 usr_fetch_inferior_registers (regcache, regno);
3234 #endif
3235 }
3236
3237 void
3238 linux_store_registers (struct regcache *regcache, int regno)
3239 {
3240 #ifdef HAVE_LINUX_REGSETS
3241 if (regsets_store_inferior_registers (regcache) == 0)
3242 return;
3243 #endif
3244 #ifdef HAVE_LINUX_USRREGS
3245 usr_store_inferior_registers (regcache, regno);
3246 #endif
3247 }
3248
3249
3250 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3251 to debugger memory starting at MYADDR. */
3252
3253 static int
3254 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3255 {
3256 register int i;
3257 /* Round starting address down to longword boundary. */
3258 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3259 /* Round ending address up; get number of longwords that makes. */
3260 register int count
3261 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3262 / sizeof (PTRACE_XFER_TYPE);
3263 /* Allocate buffer of that many longwords. */
3264 register PTRACE_XFER_TYPE *buffer
3265 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3266 int fd;
3267 char filename[64];
3268 int pid = lwpid_of (get_thread_lwp (current_inferior));
3269
3270 /* Try using /proc. Don't bother for one word. */
3271 if (len >= 3 * sizeof (long))
3272 {
3273 /* We could keep this file open and cache it - possibly one per
3274 thread. That requires some juggling, but is even faster. */
3275 sprintf (filename, "/proc/%d/mem", pid);
3276 fd = open (filename, O_RDONLY | O_LARGEFILE);
3277 if (fd == -1)
3278 goto no_proc;
3279
3280 /* If pread64 is available, use it. It's faster if the kernel
3281 supports it (only one syscall), and it's 64-bit safe even on
3282 32-bit platforms (for instance, SPARC debugging a SPARC64
3283 application). */
3284 #ifdef HAVE_PREAD64
3285 if (pread64 (fd, myaddr, len, memaddr) != len)
3286 #else
3287 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3288 #endif
3289 {
3290 close (fd);
3291 goto no_proc;
3292 }
3293
3294 close (fd);
3295 return 0;
3296 }
3297
3298 no_proc:
3299 /* Read all the longwords */
3300 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3301 {
3302 errno = 0;
3303 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3304 about coercing an 8 byte integer to a 4 byte pointer. */
3305 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3306 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3307 if (errno)
3308 return errno;
3309 }
3310
3311 /* Copy appropriate bytes out of the buffer. */
3312 memcpy (myaddr,
3313 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3314 len);
3315
3316 return 0;
3317 }
3318
3319 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3320 memory at MEMADDR. On failure (cannot write to the inferior)
3321 returns the value of errno. */
3322
3323 static int
3324 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
3325 {
3326 register int i;
3327 /* Round starting address down to longword boundary. */
3328 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3329 /* Round ending address up; get number of longwords that makes. */
3330 register int count
3331 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
3332 /* Allocate buffer of that many longwords. */
3333 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3334 int pid = lwpid_of (get_thread_lwp (current_inferior));
3335
3336 if (debug_threads)
3337 {
3338 /* Dump up to four bytes. */
3339 unsigned int val = * (unsigned int *) myaddr;
3340 if (len == 1)
3341 val = val & 0xff;
3342 else if (len == 2)
3343 val = val & 0xffff;
3344 else if (len == 3)
3345 val = val & 0xffffff;
3346 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
3347 val, (long)memaddr);
3348 }
3349
3350 /* Fill start and end extra bytes of buffer with existing memory data. */
3351
3352 errno = 0;
3353 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3354 about coercing an 8 byte integer to a 4 byte pointer. */
3355 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
3356 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3357 if (errno)
3358 return errno;
3359
3360 if (count > 1)
3361 {
3362 errno = 0;
3363 buffer[count - 1]
3364 = ptrace (PTRACE_PEEKTEXT, pid,
3365 /* Coerce to a uintptr_t first to avoid potential gcc warning
3366 about coercing an 8 byte integer to a 4 byte pointer. */
3367 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
3368 * sizeof (PTRACE_XFER_TYPE)),
3369 0);
3370 if (errno)
3371 return errno;
3372 }
3373
3374 /* Copy data to be written over corresponding part of buffer. */
3375
3376 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
3377
3378 /* Write the entire buffer. */
3379
3380 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3381 {
3382 errno = 0;
3383 ptrace (PTRACE_POKETEXT, pid,
3384 /* Coerce to a uintptr_t first to avoid potential gcc warning
3385 about coercing an 8 byte integer to a 4 byte pointer. */
3386 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
3387 (PTRACE_ARG4_TYPE) buffer[i]);
3388 if (errno)
3389 return errno;
3390 }
3391
3392 return 0;
3393 }
3394
3395 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3396 static int linux_supports_tracefork_flag;
3397
3398 static void
3399 linux_enable_event_reporting (int pid)
3400 {
3401 if (!linux_supports_tracefork_flag)
3402 return;
3403
3404 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
3405 }
3406
3407 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3408
3409 static int
3410 linux_tracefork_grandchild (void *arg)
3411 {
3412 _exit (0);
3413 }
3414
3415 #define STACK_SIZE 4096
3416
3417 static int
3418 linux_tracefork_child (void *arg)
3419 {
3420 ptrace (PTRACE_TRACEME, 0, 0, 0);
3421 kill (getpid (), SIGSTOP);
3422
3423 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3424
3425 if (fork () == 0)
3426 linux_tracefork_grandchild (NULL);
3427
3428 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3429
3430 #ifdef __ia64__
3431 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
3432 CLONE_VM | SIGCHLD, NULL);
3433 #else
3434 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
3435 CLONE_VM | SIGCHLD, NULL);
3436 #endif
3437
3438 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3439
3440 _exit (0);
3441 }
3442
3443 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3444 sure that we can enable the option, and that it had the desired
3445 effect. */
3446
3447 static void
3448 linux_test_for_tracefork (void)
3449 {
3450 int child_pid, ret, status;
3451 long second_pid;
3452 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3453 char *stack = xmalloc (STACK_SIZE * 4);
3454 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3455
3456 linux_supports_tracefork_flag = 0;
3457
3458 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3459
3460 child_pid = fork ();
3461 if (child_pid == 0)
3462 linux_tracefork_child (NULL);
3463
3464 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3465
3466 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3467 #ifdef __ia64__
3468 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
3469 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3470 #else /* !__ia64__ */
3471 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
3472 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
3473 #endif /* !__ia64__ */
3474
3475 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3476
3477 if (child_pid == -1)
3478 perror_with_name ("clone");
3479
3480 ret = my_waitpid (child_pid, &status, 0);
3481 if (ret == -1)
3482 perror_with_name ("waitpid");
3483 else if (ret != child_pid)
3484 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
3485 if (! WIFSTOPPED (status))
3486 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
3487
3488 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
3489 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
3490 if (ret != 0)
3491 {
3492 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3493 if (ret != 0)
3494 {
3495 warning ("linux_test_for_tracefork: failed to kill child");
3496 return;
3497 }
3498
3499 ret = my_waitpid (child_pid, &status, 0);
3500 if (ret != child_pid)
3501 warning ("linux_test_for_tracefork: failed to wait for killed child");
3502 else if (!WIFSIGNALED (status))
3503 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3504 "killed child", status);
3505
3506 return;
3507 }
3508
3509 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
3510 if (ret != 0)
3511 warning ("linux_test_for_tracefork: failed to resume child");
3512
3513 ret = my_waitpid (child_pid, &status, 0);
3514
3515 if (ret == child_pid && WIFSTOPPED (status)
3516 && status >> 16 == PTRACE_EVENT_FORK)
3517 {
3518 second_pid = 0;
3519 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
3520 if (ret == 0 && second_pid != 0)
3521 {
3522 int second_status;
3523
3524 linux_supports_tracefork_flag = 1;
3525 my_waitpid (second_pid, &second_status, 0);
3526 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
3527 if (ret != 0)
3528 warning ("linux_test_for_tracefork: failed to kill second child");
3529 my_waitpid (second_pid, &status, 0);
3530 }
3531 }
3532 else
3533 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3534 "(%d, status 0x%x)", ret, status);
3535
3536 do
3537 {
3538 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
3539 if (ret != 0)
3540 warning ("linux_test_for_tracefork: failed to kill child");
3541 my_waitpid (child_pid, &status, 0);
3542 }
3543 while (WIFSTOPPED (status));
3544
3545 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3546 free (stack);
3547 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3548 }
3549
3550
3551 static void
3552 linux_look_up_symbols (void)
3553 {
3554 #ifdef USE_THREAD_DB
3555 struct process_info *proc = current_process ();
3556
3557 if (proc->private->thread_db != NULL)
3558 return;
3559
3560 /* If the kernel supports tracing forks then it also supports tracing
3561 clones, and then we don't need to use the magic thread event breakpoint
3562 to learn about threads. */
3563 thread_db_init (!linux_supports_tracefork_flag);
3564 #endif
3565 }
3566
3567 static void
3568 linux_request_interrupt (void)
3569 {
3570 extern unsigned long signal_pid;
3571
3572 if (!ptid_equal (cont_thread, null_ptid)
3573 && !ptid_equal (cont_thread, minus_one_ptid))
3574 {
3575 struct lwp_info *lwp;
3576 int lwpid;
3577
3578 lwp = get_thread_lwp (current_inferior);
3579 lwpid = lwpid_of (lwp);
3580 kill_lwp (lwpid, SIGINT);
3581 }
3582 else
3583 kill_lwp (signal_pid, SIGINT);
3584 }
3585
3586 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3587 to debugger memory starting at MYADDR. */
3588
3589 static int
3590 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
3591 {
3592 char filename[PATH_MAX];
3593 int fd, n;
3594 int pid = lwpid_of (get_thread_lwp (current_inferior));
3595
3596 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
3597
3598 fd = open (filename, O_RDONLY);
3599 if (fd < 0)
3600 return -1;
3601
3602 if (offset != (CORE_ADDR) 0
3603 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3604 n = -1;
3605 else
3606 n = read (fd, myaddr, len);
3607
3608 close (fd);
3609
3610 return n;
3611 }
3612
3613 /* These breakpoint and watchpoint related wrapper functions simply
3614 pass on the function call if the target has registered a
3615 corresponding function. */
3616
3617 static int
3618 linux_insert_point (char type, CORE_ADDR addr, int len)
3619 {
3620 if (the_low_target.insert_point != NULL)
3621 return the_low_target.insert_point (type, addr, len);
3622 else
3623 /* Unsupported (see target.h). */
3624 return 1;
3625 }
3626
3627 static int
3628 linux_remove_point (char type, CORE_ADDR addr, int len)
3629 {
3630 if (the_low_target.remove_point != NULL)
3631 return the_low_target.remove_point (type, addr, len);
3632 else
3633 /* Unsupported (see target.h). */
3634 return 1;
3635 }
3636
3637 static int
3638 linux_stopped_by_watchpoint (void)
3639 {
3640 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3641
3642 return lwp->stopped_by_watchpoint;
3643 }
3644
3645 static CORE_ADDR
3646 linux_stopped_data_address (void)
3647 {
3648 struct lwp_info *lwp = get_thread_lwp (current_inferior);
3649
3650 return lwp->stopped_data_address;
3651 }
3652
3653 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3654 #if defined(__mcoldfire__)
3655 /* These should really be defined in the kernel's ptrace.h header. */
3656 #define PT_TEXT_ADDR 49*4
3657 #define PT_DATA_ADDR 50*4
3658 #define PT_TEXT_END_ADDR 51*4
3659 #endif
3660
3661 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3662 to tell gdb about. */
3663
3664 static int
3665 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
3666 {
3667 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3668 unsigned long text, text_end, data;
3669 int pid = lwpid_of (get_thread_lwp (current_inferior));
3670
3671 errno = 0;
3672
3673 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
3674 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
3675 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
3676
3677 if (errno == 0)
3678 {
3679 /* Both text and data offsets produced at compile-time (and so
3680 used by gdb) are relative to the beginning of the program,
3681 with the data segment immediately following the text segment.
3682 However, the actual runtime layout in memory may put the data
3683 somewhere else, so when we send gdb a data base-address, we
3684 use the real data base address and subtract the compile-time
3685 data base-address from it (which is just the length of the
3686 text segment). BSS immediately follows data in both
3687 cases. */
3688 *text_p = text;
3689 *data_p = data - (text_end - text);
3690
3691 return 1;
3692 }
3693 #endif
3694 return 0;
3695 }
3696 #endif
3697
3698 static int
3699 compare_ints (const void *xa, const void *xb)
3700 {
3701 int a = *(const int *)xa;
3702 int b = *(const int *)xb;
3703
3704 return a - b;
3705 }
3706
3707 static int *
3708 unique (int *b, int *e)
3709 {
3710 int *d = b;
3711 while (++b != e)
3712 if (*d != *b)
3713 *++d = *b;
3714 return ++d;
3715 }
3716
3717 /* Given PID, iterates over all threads in that process.
3718
3719 Information about each thread, in a format suitable for qXfer:osdata:thread
3720 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3721 initialized, and the caller is responsible for finishing and appending '\0'
3722 to it.
3723
3724 The list of cores that threads are running on is assigned to *CORES, if it
3725 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3726 should free *CORES. */
3727
3728 static void
3729 list_threads (int pid, struct buffer *buffer, char **cores)
3730 {
3731 int count = 0;
3732 int allocated = 10;
3733 int *core_numbers = xmalloc (sizeof (int) * allocated);
3734 char pathname[128];
3735 DIR *dir;
3736 struct dirent *dp;
3737 struct stat statbuf;
3738
3739 sprintf (pathname, "/proc/%d/task", pid);
3740 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
3741 {
3742 dir = opendir (pathname);
3743 if (!dir)
3744 {
3745 free (core_numbers);
3746 return;
3747 }
3748
3749 while ((dp = readdir (dir)) != NULL)
3750 {
3751 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
3752
3753 if (lwp != 0)
3754 {
3755 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
3756
3757 if (core != -1)
3758 {
3759 char s[sizeof ("4294967295")];
3760 sprintf (s, "%u", core);
3761
3762 if (count == allocated)
3763 {
3764 allocated *= 2;
3765 core_numbers = realloc (core_numbers,
3766 sizeof (int) * allocated);
3767 }
3768 core_numbers[count++] = core;
3769 if (buffer)
3770 buffer_xml_printf (buffer,
3771 "<item>"
3772 "<column name=\"pid\">%d</column>"
3773 "<column name=\"tid\">%s</column>"
3774 "<column name=\"core\">%s</column>"
3775 "</item>", pid, dp->d_name, s);
3776 }
3777 else
3778 {
3779 if (buffer)
3780 buffer_xml_printf (buffer,
3781 "<item>"
3782 "<column name=\"pid\">%d</column>"
3783 "<column name=\"tid\">%s</column>"
3784 "</item>", pid, dp->d_name);
3785 }
3786 }
3787 }
3788 }
3789
3790 if (cores)
3791 {
3792 *cores = NULL;
3793 if (count > 0)
3794 {
3795 struct buffer buffer2;
3796 int *b;
3797 int *e;
3798 qsort (core_numbers, count, sizeof (int), compare_ints);
3799
3800 /* Remove duplicates. */
3801 b = core_numbers;
3802 e = unique (b, core_numbers + count);
3803
3804 buffer_init (&buffer2);
3805
3806 for (b = core_numbers; b != e; ++b)
3807 {
3808 char number[sizeof ("4294967295")];
3809 sprintf (number, "%u", *b);
3810 buffer_xml_printf (&buffer2, "%s%s",
3811 (b == core_numbers) ? "" : ",", number);
3812 }
3813 buffer_grow_str0 (&buffer2, "");
3814
3815 *cores = buffer_finish (&buffer2);
3816 }
3817 }
3818 free (core_numbers);
3819 }
3820
3821 static void
3822 show_process (int pid, const char *username, struct buffer *buffer)
3823 {
3824 char pathname[128];
3825 FILE *f;
3826 char cmd[MAXPATHLEN + 1];
3827
3828 sprintf (pathname, "/proc/%d/cmdline", pid);
3829
3830 if ((f = fopen (pathname, "r")) != NULL)
3831 {
3832 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3833 if (len > 0)
3834 {
3835 char *cores = 0;
3836 int i;
3837 for (i = 0; i < len; i++)
3838 if (cmd[i] == '\0')
3839 cmd[i] = ' ';
3840 cmd[len] = '\0';
3841
3842 buffer_xml_printf (buffer,
3843 "<item>"
3844 "<column name=\"pid\">%d</column>"
3845 "<column name=\"user\">%s</column>"
3846 "<column name=\"command\">%s</column>",
3847 pid,
3848 username,
3849 cmd);
3850
3851 /* This only collects core numbers, and does not print threads. */
3852 list_threads (pid, NULL, &cores);
3853
3854 if (cores)
3855 {
3856 buffer_xml_printf (buffer,
3857 "<column name=\"cores\">%s</column>", cores);
3858 free (cores);
3859 }
3860
3861 buffer_xml_printf (buffer, "</item>");
3862 }
3863 fclose (f);
3864 }
3865 }
3866
3867 static int
3868 linux_qxfer_osdata (const char *annex,
3869 unsigned char *readbuf, unsigned const char *writebuf,
3870 CORE_ADDR offset, int len)
3871 {
3872 /* We make the process list snapshot when the object starts to be
3873 read. */
3874 static const char *buf;
3875 static long len_avail = -1;
3876 static struct buffer buffer;
3877 int processes = 0;
3878 int threads = 0;
3879
3880 DIR *dirp;
3881
3882 if (strcmp (annex, "processes") == 0)
3883 processes = 1;
3884 else if (strcmp (annex, "threads") == 0)
3885 threads = 1;
3886 else
3887 return 0;
3888
3889 if (!readbuf || writebuf)
3890 return 0;
3891
3892 if (offset == 0)
3893 {
3894 if (len_avail != -1 && len_avail != 0)
3895 buffer_free (&buffer);
3896 len_avail = 0;
3897 buf = NULL;
3898 buffer_init (&buffer);
3899 if (processes)
3900 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3901 else if (threads)
3902 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3903
3904 dirp = opendir ("/proc");
3905 if (dirp)
3906 {
3907 struct dirent *dp;
3908 while ((dp = readdir (dirp)) != NULL)
3909 {
3910 struct stat statbuf;
3911 char procentry[sizeof ("/proc/4294967295")];
3912
3913 if (!isdigit (dp->d_name[0])
3914 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3915 continue;
3916
3917 sprintf (procentry, "/proc/%s", dp->d_name);
3918 if (stat (procentry, &statbuf) == 0
3919 && S_ISDIR (statbuf.st_mode))
3920 {
3921 int pid = (int) strtoul (dp->d_name, NULL, 10);
3922
3923 if (processes)
3924 {
3925 struct passwd *entry = getpwuid (statbuf.st_uid);
3926 show_process (pid, entry ? entry->pw_name : "?", &buffer);
3927 }
3928 else if (threads)
3929 {
3930 list_threads (pid, &buffer, NULL);
3931 }
3932 }
3933 }
3934
3935 closedir (dirp);
3936 }
3937 buffer_grow_str0 (&buffer, "</osdata>\n");
3938 buf = buffer_finish (&buffer);
3939 len_avail = strlen (buf);
3940 }
3941
3942 if (offset >= len_avail)
3943 {
3944 /* Done. Get rid of the data. */
3945 buffer_free (&buffer);
3946 buf = NULL;
3947 len_avail = 0;
3948 return 0;
3949 }
3950
3951 if (len > len_avail - offset)
3952 len = len_avail - offset;
3953 memcpy (readbuf, buf + offset, len);
3954
3955 return len;
3956 }
3957
3958 /* Convert a native/host siginfo object, into/from the siginfo in the
3959 layout of the inferiors' architecture. */
3960
3961 static void
3962 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3963 {
3964 int done = 0;
3965
3966 if (the_low_target.siginfo_fixup != NULL)
3967 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3968
3969 /* If there was no callback, or the callback didn't do anything,
3970 then just do a straight memcpy. */
3971 if (!done)
3972 {
3973 if (direction == 1)
3974 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3975 else
3976 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3977 }
3978 }
3979
3980 static int
3981 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3982 unsigned const char *writebuf, CORE_ADDR offset, int len)
3983 {
3984 int pid;
3985 struct siginfo siginfo;
3986 char inf_siginfo[sizeof (struct siginfo)];
3987
3988 if (current_inferior == NULL)
3989 return -1;
3990
3991 pid = lwpid_of (get_thread_lwp (current_inferior));
3992
3993 if (debug_threads)
3994 fprintf (stderr, "%s siginfo for lwp %d.\n",
3995 readbuf != NULL ? "Reading" : "Writing",
3996 pid);
3997
3998 if (offset > sizeof (siginfo))
3999 return -1;
4000
4001 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4002 return -1;
4003
4004 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4005 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4006 inferior with a 64-bit GDBSERVER should look the same as debugging it
4007 with a 32-bit GDBSERVER, we need to convert it. */
4008 siginfo_fixup (&siginfo, inf_siginfo, 0);
4009
4010 if (offset + len > sizeof (siginfo))
4011 len = sizeof (siginfo) - offset;
4012
4013 if (readbuf != NULL)
4014 memcpy (readbuf, inf_siginfo + offset, len);
4015 else
4016 {
4017 memcpy (inf_siginfo + offset, writebuf, len);
4018
4019 /* Convert back to ptrace layout before flushing it out. */
4020 siginfo_fixup (&siginfo, inf_siginfo, 1);
4021
4022 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4023 return -1;
4024 }
4025
4026 return len;
4027 }
4028
4029 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4030 so we notice when children change state; as the handler for the
4031 sigsuspend in my_waitpid. */
4032
4033 static void
4034 sigchld_handler (int signo)
4035 {
4036 int old_errno = errno;
4037
4038 if (debug_threads)
4039 /* fprintf is not async-signal-safe, so call write directly. */
4040 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4041
4042 if (target_is_async_p ())
4043 async_file_mark (); /* trigger a linux_wait */
4044
4045 errno = old_errno;
4046 }
4047
4048 static int
4049 linux_supports_non_stop (void)
4050 {
4051 return 1;
4052 }
4053
4054 static int
4055 linux_async (int enable)
4056 {
4057 int previous = (linux_event_pipe[0] != -1);
4058
4059 if (debug_threads)
4060 fprintf (stderr, "linux_async (%d), previous=%d\n",
4061 enable, previous);
4062
4063 if (previous != enable)
4064 {
4065 sigset_t mask;
4066 sigemptyset (&mask);
4067 sigaddset (&mask, SIGCHLD);
4068
4069 sigprocmask (SIG_BLOCK, &mask, NULL);
4070
4071 if (enable)
4072 {
4073 if (pipe (linux_event_pipe) == -1)
4074 fatal ("creating event pipe failed.");
4075
4076 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4077 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4078
4079 /* Register the event loop handler. */
4080 add_file_handler (linux_event_pipe[0],
4081 handle_target_event, NULL);
4082
4083 /* Always trigger a linux_wait. */
4084 async_file_mark ();
4085 }
4086 else
4087 {
4088 delete_file_handler (linux_event_pipe[0]);
4089
4090 close (linux_event_pipe[0]);
4091 close (linux_event_pipe[1]);
4092 linux_event_pipe[0] = -1;
4093 linux_event_pipe[1] = -1;
4094 }
4095
4096 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4097 }
4098
4099 return previous;
4100 }
4101
4102 static int
4103 linux_start_non_stop (int nonstop)
4104 {
4105 /* Register or unregister from event-loop accordingly. */
4106 linux_async (nonstop);
4107 return 0;
4108 }
4109
4110 static int
4111 linux_supports_multi_process (void)
4112 {
4113 return 1;
4114 }
4115
4116
4117 /* Enumerate spufs IDs for process PID. */
4118 static int
4119 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4120 {
4121 int pos = 0;
4122 int written = 0;
4123 char path[128];
4124 DIR *dir;
4125 struct dirent *entry;
4126
4127 sprintf (path, "/proc/%ld/fd", pid);
4128 dir = opendir (path);
4129 if (!dir)
4130 return -1;
4131
4132 rewinddir (dir);
4133 while ((entry = readdir (dir)) != NULL)
4134 {
4135 struct stat st;
4136 struct statfs stfs;
4137 int fd;
4138
4139 fd = atoi (entry->d_name);
4140 if (!fd)
4141 continue;
4142
4143 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4144 if (stat (path, &st) != 0)
4145 continue;
4146 if (!S_ISDIR (st.st_mode))
4147 continue;
4148
4149 if (statfs (path, &stfs) != 0)
4150 continue;
4151 if (stfs.f_type != SPUFS_MAGIC)
4152 continue;
4153
4154 if (pos >= offset && pos + 4 <= offset + len)
4155 {
4156 *(unsigned int *)(buf + pos - offset) = fd;
4157 written += 4;
4158 }
4159 pos += 4;
4160 }
4161
4162 closedir (dir);
4163 return written;
4164 }
4165
4166 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4167 object type, using the /proc file system. */
4168 static int
4169 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4170 unsigned const char *writebuf,
4171 CORE_ADDR offset, int len)
4172 {
4173 long pid = lwpid_of (get_thread_lwp (current_inferior));
4174 char buf[128];
4175 int fd = 0;
4176 int ret = 0;
4177
4178 if (!writebuf && !readbuf)
4179 return -1;
4180
4181 if (!*annex)
4182 {
4183 if (!readbuf)
4184 return -1;
4185 else
4186 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4187 }
4188
4189 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4190 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4191 if (fd <= 0)
4192 return -1;
4193
4194 if (offset != 0
4195 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4196 {
4197 close (fd);
4198 return 0;
4199 }
4200
4201 if (writebuf)
4202 ret = write (fd, writebuf, (size_t) len);
4203 else
4204 ret = read (fd, readbuf, (size_t) len);
4205
4206 close (fd);
4207 return ret;
4208 }
4209
4210 static int
4211 linux_core_of_thread (ptid_t ptid)
4212 {
4213 char filename[sizeof ("/proc//task//stat")
4214 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4215 + 1];
4216 FILE *f;
4217 char *content = NULL;
4218 char *p;
4219 char *ts = 0;
4220 int content_read = 0;
4221 int i;
4222 int core;
4223
4224 sprintf (filename, "/proc/%d/task/%ld/stat",
4225 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4226 f = fopen (filename, "r");
4227 if (!f)
4228 return -1;
4229
4230 for (;;)
4231 {
4232 int n;
4233 content = realloc (content, content_read + 1024);
4234 n = fread (content + content_read, 1, 1024, f);
4235 content_read += n;
4236 if (n < 1024)
4237 {
4238 content[content_read] = '\0';
4239 break;
4240 }
4241 }
4242
4243 p = strchr (content, '(');
4244 p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
4245
4246 p = strtok_r (p, " ", &ts);
4247 for (i = 0; i != 36; ++i)
4248 p = strtok_r (NULL, " ", &ts);
4249
4250 if (sscanf (p, "%d", &core) == 0)
4251 core = -1;
4252
4253 free (content);
4254 fclose (f);
4255
4256 return core;
4257 }
4258
4259 static void
4260 linux_process_qsupported (const char *query)
4261 {
4262 if (the_low_target.process_qsupported != NULL)
4263 the_low_target.process_qsupported (query);
4264 }
4265
4266 static int
4267 linux_supports_tracepoints (void)
4268 {
4269 if (*the_low_target.supports_tracepoints == NULL)
4270 return 0;
4271
4272 return (*the_low_target.supports_tracepoints) ();
4273 }
4274
4275 static CORE_ADDR
4276 linux_read_pc (struct regcache *regcache)
4277 {
4278 if (the_low_target.get_pc == NULL)
4279 return 0;
4280
4281 return (*the_low_target.get_pc) (regcache);
4282 }
4283
4284 static void
4285 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4286 {
4287 gdb_assert (the_low_target.set_pc != NULL);
4288
4289 (*the_low_target.set_pc) (regcache, pc);
4290 }
4291
4292 static int
4293 linux_thread_stopped (struct thread_info *thread)
4294 {
4295 return get_thread_lwp (thread)->stopped;
4296 }
4297
4298 /* This exposes stop-all-threads functionality to other modules. */
4299
4300 static void
4301 linux_pause_all (void)
4302 {
4303 stop_all_lwps ();
4304 }
4305
4306 static struct target_ops linux_target_ops = {
4307 linux_create_inferior,
4308 linux_attach,
4309 linux_kill,
4310 linux_detach,
4311 linux_mourn,
4312 linux_join,
4313 linux_thread_alive,
4314 linux_resume,
4315 linux_wait,
4316 linux_fetch_registers,
4317 linux_store_registers,
4318 linux_read_memory,
4319 linux_write_memory,
4320 linux_look_up_symbols,
4321 linux_request_interrupt,
4322 linux_read_auxv,
4323 linux_insert_point,
4324 linux_remove_point,
4325 linux_stopped_by_watchpoint,
4326 linux_stopped_data_address,
4327 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4328 linux_read_offsets,
4329 #else
4330 NULL,
4331 #endif
4332 #ifdef USE_THREAD_DB
4333 thread_db_get_tls_address,
4334 #else
4335 NULL,
4336 #endif
4337 linux_qxfer_spu,
4338 hostio_last_error_from_errno,
4339 linux_qxfer_osdata,
4340 linux_xfer_siginfo,
4341 linux_supports_non_stop,
4342 linux_async,
4343 linux_start_non_stop,
4344 linux_supports_multi_process,
4345 #ifdef USE_THREAD_DB
4346 thread_db_handle_monitor_command,
4347 #else
4348 NULL,
4349 #endif
4350 linux_core_of_thread,
4351 linux_process_qsupported,
4352 linux_supports_tracepoints,
4353 linux_read_pc,
4354 linux_write_pc,
4355 linux_thread_stopped,
4356 linux_pause_all,
4357 NULL, /* get_tib_address (Windows OS specific). */
4358 };
4359
4360 static void
4361 linux_init_signals ()
4362 {
4363 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4364 to find what the cancel signal actually is. */
4365 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4366 signal (__SIGRTMIN+1, SIG_IGN);
4367 #endif
4368 }
4369
4370 void
4371 initialize_low (void)
4372 {
4373 struct sigaction sigchld_action;
4374 memset (&sigchld_action, 0, sizeof (sigchld_action));
4375 set_target_ops (&linux_target_ops);
4376 set_breakpoint_data (the_low_target.breakpoint,
4377 the_low_target.breakpoint_len);
4378 linux_init_signals ();
4379 linux_test_for_tracefork ();
4380 #ifdef HAVE_LINUX_REGSETS
4381 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4382 ;
4383 disabled_regsets = xmalloc (num_regsets);
4384 #endif
4385
4386 sigchld_action.sa_handler = sigchld_handler;
4387 sigemptyset (&sigchld_action.sa_mask);
4388 sigchld_action.sa_flags = SA_RESTART;
4389 sigaction (SIGCHLD, &sigchld_action, NULL);
4390 }
This page took 0.15133 seconds and 4 git commands to generate.