* i387-fp.c, linux-arm-low.c, linux-cris-low.c,
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40
41 #ifndef PTRACE_GETSIGINFO
42 # define PTRACE_GETSIGINFO 0x4202
43 # define PTRACE_SETSIGINFO 0x4203
44 #endif
45
46 #ifndef O_LARGEFILE
47 #define O_LARGEFILE 0
48 #endif
49
50 /* If the system headers did not provide the constants, hard-code the normal
51 values. */
52 #ifndef PTRACE_EVENT_FORK
53
54 #define PTRACE_SETOPTIONS 0x4200
55 #define PTRACE_GETEVENTMSG 0x4201
56
57 /* options set using PTRACE_SETOPTIONS */
58 #define PTRACE_O_TRACESYSGOOD 0x00000001
59 #define PTRACE_O_TRACEFORK 0x00000002
60 #define PTRACE_O_TRACEVFORK 0x00000004
61 #define PTRACE_O_TRACECLONE 0x00000008
62 #define PTRACE_O_TRACEEXEC 0x00000010
63 #define PTRACE_O_TRACEVFORKDONE 0x00000020
64 #define PTRACE_O_TRACEEXIT 0x00000040
65
66 /* Wait extended result codes for the above trace options. */
67 #define PTRACE_EVENT_FORK 1
68 #define PTRACE_EVENT_VFORK 2
69 #define PTRACE_EVENT_CLONE 3
70 #define PTRACE_EVENT_EXEC 4
71 #define PTRACE_EVENT_VFORK_DONE 5
72 #define PTRACE_EVENT_EXIT 6
73
74 #endif /* PTRACE_EVENT_FORK */
75
76 /* We can't always assume that this flag is available, but all systems
77 with the ptrace event handlers also have __WALL, so it's safe to use
78 in some contexts. */
79 #ifndef __WALL
80 #define __WALL 0x40000000 /* Wait for any child. */
81 #endif
82
83 #ifdef __UCLIBC__
84 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
85 #define HAS_NOMMU
86 #endif
87 #endif
88
89 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
90 representation of the thread ID.
91
92 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
93 the same as the LWP ID. */
94
95 struct inferior_list all_lwps;
96
97 /* A list of all unknown processes which receive stop signals. Some other
98 process will presumably claim each of these as forked children
99 momentarily. */
100
101 struct inferior_list stopped_pids;
102
103 /* FIXME this is a bit of a hack, and could be removed. */
104 int stopping_threads;
105
106 /* FIXME make into a target method? */
107 int using_threads = 1;
108 static int thread_db_active;
109
110 static int must_set_ptrace_flags;
111
112 /* This flag is true iff we've just created or attached to a new inferior
113 but it has not stopped yet. As soon as it does, we need to call the
114 low target's arch_setup callback. */
115 static int new_inferior;
116
117 static void linux_resume_one_lwp (struct inferior_list_entry *entry,
118 int step, int signal, siginfo_t *info);
119 static void linux_resume (struct thread_resume *resume_info);
120 static void stop_all_lwps (void);
121 static int linux_wait_for_event (struct thread_info *child);
122 static int check_removed_breakpoint (struct lwp_info *event_child);
123 static void *add_lwp (unsigned long pid);
124 static int my_waitpid (int pid, int *status, int flags);
125
126 struct pending_signals
127 {
128 int signal;
129 siginfo_t info;
130 struct pending_signals *prev;
131 };
132
133 #define PTRACE_ARG3_TYPE long
134 #define PTRACE_XFER_TYPE long
135
136 #ifdef HAVE_LINUX_REGSETS
137 static char *disabled_regsets;
138 static int num_regsets;
139 #endif
140
141 #define pid_of(proc) ((proc)->head.id)
142
143 /* FIXME: Delete eventually. */
144 #define inferior_pid (pid_of (get_thread_lwp (current_inferior)))
145
146 static void
147 handle_extended_wait (struct lwp_info *event_child, int wstat)
148 {
149 int event = wstat >> 16;
150 struct lwp_info *new_lwp;
151
152 if (event == PTRACE_EVENT_CLONE)
153 {
154 unsigned long new_pid;
155 int ret, status = W_STOPCODE (SIGSTOP);
156
157 ptrace (PTRACE_GETEVENTMSG, inferior_pid, 0, &new_pid);
158
159 /* If we haven't already seen the new PID stop, wait for it now. */
160 if (! pull_pid_from_list (&stopped_pids, new_pid))
161 {
162 /* The new child has a pending SIGSTOP. We can't affect it until it
163 hits the SIGSTOP, but we're already attached. */
164
165 ret = my_waitpid (new_pid, &status, __WALL);
166
167 if (ret == -1)
168 perror_with_name ("waiting for new child");
169 else if (ret != new_pid)
170 warning ("wait returned unexpected PID %d", ret);
171 else if (!WIFSTOPPED (status))
172 warning ("wait returned unexpected status 0x%x", status);
173 }
174
175 ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE);
176
177 new_lwp = (struct lwp_info *) add_lwp (new_pid);
178 add_thread (new_pid, new_lwp, new_pid);
179 new_thread_notify (thread_id_to_gdb_id (new_lwp->lwpid));
180
181 /* Normally we will get the pending SIGSTOP. But in some cases
182 we might get another signal delivered to the group first.
183 If we do get another signal, be sure not to lose it. */
184 if (WSTOPSIG (status) == SIGSTOP)
185 {
186 if (stopping_threads)
187 new_lwp->stopped = 1;
188 else
189 ptrace (PTRACE_CONT, new_pid, 0, 0);
190 }
191 else
192 {
193 new_lwp->stop_expected = 1;
194 if (stopping_threads)
195 {
196 new_lwp->stopped = 1;
197 new_lwp->status_pending_p = 1;
198 new_lwp->status_pending = status;
199 }
200 else
201 /* Pass the signal on. This is what GDB does - except
202 shouldn't we really report it instead? */
203 ptrace (PTRACE_CONT, new_pid, 0, WSTOPSIG (status));
204 }
205
206 /* Always resume the current thread. If we are stopping
207 threads, it will have a pending SIGSTOP; we may as well
208 collect it now. */
209 linux_resume_one_lwp (&event_child->head,
210 event_child->stepping, 0, NULL);
211 }
212 }
213
214 /* This function should only be called if the process got a SIGTRAP.
215 The SIGTRAP could mean several things.
216
217 On i386, where decr_pc_after_break is non-zero:
218 If we were single-stepping this process using PTRACE_SINGLESTEP,
219 we will get only the one SIGTRAP (even if the instruction we
220 stepped over was a breakpoint). The value of $eip will be the
221 next instruction.
222 If we continue the process using PTRACE_CONT, we will get a
223 SIGTRAP when we hit a breakpoint. The value of $eip will be
224 the instruction after the breakpoint (i.e. needs to be
225 decremented). If we report the SIGTRAP to GDB, we must also
226 report the undecremented PC. If we cancel the SIGTRAP, we
227 must resume at the decremented PC.
228
229 (Presumably, not yet tested) On a non-decr_pc_after_break machine
230 with hardware or kernel single-step:
231 If we single-step over a breakpoint instruction, our PC will
232 point at the following instruction. If we continue and hit a
233 breakpoint instruction, our PC will point at the breakpoint
234 instruction. */
235
236 static CORE_ADDR
237 get_stop_pc (void)
238 {
239 CORE_ADDR stop_pc = (*the_low_target.get_pc) ();
240
241 if (get_thread_lwp (current_inferior)->stepping)
242 return stop_pc;
243 else
244 return stop_pc - the_low_target.decr_pc_after_break;
245 }
246
247 static void *
248 add_lwp (unsigned long pid)
249 {
250 struct lwp_info *lwp;
251
252 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
253 memset (lwp, 0, sizeof (*lwp));
254
255 lwp->head.id = pid;
256 lwp->lwpid = pid;
257
258 add_inferior_to_list (&all_lwps, &lwp->head);
259
260 return lwp;
261 }
262
263 /* Start an inferior process and returns its pid.
264 ALLARGS is a vector of program-name and args. */
265
266 static int
267 linux_create_inferior (char *program, char **allargs)
268 {
269 void *new_lwp;
270 int pid;
271
272 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
273 pid = vfork ();
274 #else
275 pid = fork ();
276 #endif
277 if (pid < 0)
278 perror_with_name ("fork");
279
280 if (pid == 0)
281 {
282 ptrace (PTRACE_TRACEME, 0, 0, 0);
283
284 signal (__SIGRTMIN + 1, SIG_DFL);
285
286 setpgid (0, 0);
287
288 execv (program, allargs);
289 if (errno == ENOENT)
290 execvp (program, allargs);
291
292 fprintf (stderr, "Cannot exec %s: %s.\n", program,
293 strerror (errno));
294 fflush (stderr);
295 _exit (0177);
296 }
297
298 new_lwp = add_lwp (pid);
299 add_thread (pid, new_lwp, pid);
300 must_set_ptrace_flags = 1;
301 new_inferior = 1;
302
303 return pid;
304 }
305
306 /* Attach to an inferior process. */
307
308 void
309 linux_attach_lwp (unsigned long pid)
310 {
311 struct lwp_info *new_lwp;
312
313 if (ptrace (PTRACE_ATTACH, pid, 0, 0) != 0)
314 {
315 if (all_threads.head != NULL)
316 {
317 /* If we fail to attach to an LWP, just warn. */
318 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", pid,
319 strerror (errno), errno);
320 fflush (stderr);
321 return;
322 }
323 else
324 /* If we fail to attach to a process, report an error. */
325 error ("Cannot attach to process %ld: %s (%d)\n", pid,
326 strerror (errno), errno);
327 }
328
329 /* FIXME: This intermittently fails.
330 We need to wait for SIGSTOP first. */
331 ptrace (PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACECLONE);
332
333 new_lwp = (struct lwp_info *) add_lwp (pid);
334 add_thread (pid, new_lwp, pid);
335 new_thread_notify (thread_id_to_gdb_id (new_lwp->lwpid));
336
337 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
338 brings it to a halt.
339
340 There are several cases to consider here:
341
342 1) gdbserver has already attached to the process and is being notified
343 of a new thread that is being created.
344 In this case we should ignore that SIGSTOP and resume the process.
345 This is handled below by setting stop_expected = 1.
346
347 2) This is the first thread (the process thread), and we're attaching
348 to it via attach_inferior.
349 In this case we want the process thread to stop.
350 This is handled by having linux_attach clear stop_expected after
351 we return.
352 ??? If the process already has several threads we leave the other
353 threads running.
354
355 3) GDB is connecting to gdbserver and is requesting an enumeration of all
356 existing threads.
357 In this case we want the thread to stop.
358 FIXME: This case is currently not properly handled.
359 We should wait for the SIGSTOP but don't. Things work apparently
360 because enough time passes between when we ptrace (ATTACH) and when
361 gdb makes the next ptrace call on the thread.
362
363 On the other hand, if we are currently trying to stop all threads, we
364 should treat the new thread as if we had sent it a SIGSTOP. This works
365 because we are guaranteed that the add_lwp call above added us to the
366 end of the list, and so the new thread has not yet reached
367 wait_for_sigstop (but will). */
368 if (! stopping_threads)
369 new_lwp->stop_expected = 1;
370 }
371
372 int
373 linux_attach (unsigned long pid)
374 {
375 struct lwp_info *lwp;
376
377 linux_attach_lwp (pid);
378
379 /* Don't ignore the initial SIGSTOP if we just attached to this process.
380 It will be collected by wait shortly. */
381 lwp = (struct lwp_info *) find_inferior_id (&all_lwps, pid);
382 lwp->stop_expected = 0;
383
384 new_inferior = 1;
385
386 return 0;
387 }
388
389 /* Kill the inferior process. Make us have no inferior. */
390
391 static void
392 linux_kill_one_lwp (struct inferior_list_entry *entry)
393 {
394 struct thread_info *thread = (struct thread_info *) entry;
395 struct lwp_info *lwp = get_thread_lwp (thread);
396 int wstat;
397
398 /* We avoid killing the first thread here, because of a Linux kernel (at
399 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
400 the children get a chance to be reaped, it will remain a zombie
401 forever. */
402 if (entry == all_threads.head)
403 return;
404
405 do
406 {
407 ptrace (PTRACE_KILL, pid_of (lwp), 0, 0);
408
409 /* Make sure it died. The loop is most likely unnecessary. */
410 wstat = linux_wait_for_event (thread);
411 } while (WIFSTOPPED (wstat));
412 }
413
414 static void
415 linux_kill (void)
416 {
417 struct thread_info *thread = (struct thread_info *) all_threads.head;
418 struct lwp_info *lwp;
419 int wstat;
420
421 if (thread == NULL)
422 return;
423
424 for_each_inferior (&all_threads, linux_kill_one_lwp);
425
426 /* See the comment in linux_kill_one_lwp. We did not kill the first
427 thread in the list, so do so now. */
428 lwp = get_thread_lwp (thread);
429 do
430 {
431 ptrace (PTRACE_KILL, pid_of (lwp), 0, 0);
432
433 /* Make sure it died. The loop is most likely unnecessary. */
434 wstat = linux_wait_for_event (thread);
435 } while (WIFSTOPPED (wstat));
436
437 clear_inferiors ();
438 free (all_lwps.head);
439 all_lwps.head = all_lwps.tail = NULL;
440 }
441
442 static void
443 linux_detach_one_lwp (struct inferior_list_entry *entry)
444 {
445 struct thread_info *thread = (struct thread_info *) entry;
446 struct lwp_info *lwp = get_thread_lwp (thread);
447
448 /* Make sure the process isn't stopped at a breakpoint that's
449 no longer there. */
450 check_removed_breakpoint (lwp);
451
452 /* If this process is stopped but is expecting a SIGSTOP, then make
453 sure we take care of that now. This isn't absolutely guaranteed
454 to collect the SIGSTOP, but is fairly likely to. */
455 if (lwp->stop_expected)
456 {
457 /* Clear stop_expected, so that the SIGSTOP will be reported. */
458 lwp->stop_expected = 0;
459 if (lwp->stopped)
460 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
461 linux_wait_for_event (thread);
462 }
463
464 /* Flush any pending changes to the process's registers. */
465 regcache_invalidate_one ((struct inferior_list_entry *)
466 get_lwp_thread (lwp));
467
468 /* Finally, let it resume. */
469 ptrace (PTRACE_DETACH, pid_of (lwp), 0, 0);
470 }
471
472 static int
473 linux_detach (void)
474 {
475 delete_all_breakpoints ();
476 for_each_inferior (&all_threads, linux_detach_one_lwp);
477 clear_inferiors ();
478 free (all_lwps.head);
479 all_lwps.head = all_lwps.tail = NULL;
480 return 0;
481 }
482
483 static void
484 linux_join (void)
485 {
486 extern unsigned long signal_pid;
487 int status, ret;
488
489 do {
490 ret = waitpid (signal_pid, &status, 0);
491 if (WIFEXITED (status) || WIFSIGNALED (status))
492 break;
493 } while (ret != -1 || errno != ECHILD);
494 }
495
496 /* Return nonzero if the given thread is still alive. */
497 static int
498 linux_thread_alive (unsigned long lwpid)
499 {
500 if (find_inferior_id (&all_threads, lwpid) != NULL)
501 return 1;
502 else
503 return 0;
504 }
505
506 /* Return nonzero if this process stopped at a breakpoint which
507 no longer appears to be inserted. Also adjust the PC
508 appropriately to resume where the breakpoint used to be. */
509 static int
510 check_removed_breakpoint (struct lwp_info *event_child)
511 {
512 CORE_ADDR stop_pc;
513 struct thread_info *saved_inferior;
514
515 if (event_child->pending_is_breakpoint == 0)
516 return 0;
517
518 if (debug_threads)
519 fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
520 event_child->lwpid);
521
522 saved_inferior = current_inferior;
523 current_inferior = get_lwp_thread (event_child);
524
525 stop_pc = get_stop_pc ();
526
527 /* If the PC has changed since we stopped, then we shouldn't do
528 anything. This happens if, for instance, GDB handled the
529 decr_pc_after_break subtraction itself. */
530 if (stop_pc != event_child->pending_stop_pc)
531 {
532 if (debug_threads)
533 fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n",
534 event_child->pending_stop_pc);
535
536 event_child->pending_is_breakpoint = 0;
537 current_inferior = saved_inferior;
538 return 0;
539 }
540
541 /* If the breakpoint is still there, we will report hitting it. */
542 if ((*the_low_target.breakpoint_at) (stop_pc))
543 {
544 if (debug_threads)
545 fprintf (stderr, "Ignoring, breakpoint is still present.\n");
546 current_inferior = saved_inferior;
547 return 0;
548 }
549
550 if (debug_threads)
551 fprintf (stderr, "Removed breakpoint.\n");
552
553 /* For decr_pc_after_break targets, here is where we perform the
554 decrement. We go immediately from this function to resuming,
555 and can not safely call get_stop_pc () again. */
556 if (the_low_target.set_pc != NULL)
557 (*the_low_target.set_pc) (stop_pc);
558
559 /* We consumed the pending SIGTRAP. */
560 event_child->pending_is_breakpoint = 0;
561 event_child->status_pending_p = 0;
562 event_child->status_pending = 0;
563
564 current_inferior = saved_inferior;
565 return 1;
566 }
567
568 /* Return 1 if this lwp has an interesting status pending. This
569 function may silently resume an inferior lwp. */
570 static int
571 status_pending_p (struct inferior_list_entry *entry, void *dummy)
572 {
573 struct lwp_info *lwp = (struct lwp_info *) entry;
574
575 if (lwp->status_pending_p)
576 if (check_removed_breakpoint (lwp))
577 {
578 /* This thread was stopped at a breakpoint, and the breakpoint
579 is now gone. We were told to continue (or step...) all threads,
580 so GDB isn't trying to single-step past this breakpoint.
581 So instead of reporting the old SIGTRAP, pretend we got to
582 the breakpoint just after it was removed instead of just
583 before; resume the process. */
584 linux_resume_one_lwp (&lwp->head, 0, 0, NULL);
585 return 0;
586 }
587
588 return lwp->status_pending_p;
589 }
590
591 static void
592 linux_wait_for_lwp (struct lwp_info **childp, int *wstatp)
593 {
594 int ret;
595 int to_wait_for = -1;
596
597 if (*childp != NULL)
598 to_wait_for = (*childp)->lwpid;
599
600 retry:
601 while (1)
602 {
603 ret = waitpid (to_wait_for, wstatp, WNOHANG);
604
605 if (ret == -1)
606 {
607 if (errno != ECHILD)
608 perror_with_name ("waitpid");
609 }
610 else if (ret > 0)
611 break;
612
613 ret = waitpid (to_wait_for, wstatp, WNOHANG | __WCLONE);
614
615 if (ret == -1)
616 {
617 if (errno != ECHILD)
618 perror_with_name ("waitpid (WCLONE)");
619 }
620 else if (ret > 0)
621 break;
622
623 usleep (1000);
624 }
625
626 if (debug_threads
627 && (!WIFSTOPPED (*wstatp)
628 || (WSTOPSIG (*wstatp) != 32
629 && WSTOPSIG (*wstatp) != 33)))
630 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
631
632 if (to_wait_for == -1)
633 *childp = (struct lwp_info *) find_inferior_id (&all_lwps, ret);
634
635 /* If we didn't find a process, one of two things presumably happened:
636 - A process we started and then detached from has exited. Ignore it.
637 - A process we are controlling has forked and the new child's stop
638 was reported to us by the kernel. Save its PID. */
639 if (*childp == NULL && WIFSTOPPED (*wstatp))
640 {
641 add_pid_to_list (&stopped_pids, ret);
642 goto retry;
643 }
644 else if (*childp == NULL)
645 goto retry;
646
647 (*childp)->stopped = 1;
648 (*childp)->pending_is_breakpoint = 0;
649
650 (*childp)->last_status = *wstatp;
651
652 /* Architecture-specific setup after inferior is running.
653 This needs to happen after we have attached to the inferior
654 and it is stopped for the first time, but before we access
655 any inferior registers. */
656 if (new_inferior)
657 {
658 the_low_target.arch_setup ();
659 #ifdef HAVE_LINUX_REGSETS
660 memset (disabled_regsets, 0, num_regsets);
661 #endif
662 new_inferior = 0;
663 }
664
665 if (debug_threads
666 && WIFSTOPPED (*wstatp))
667 {
668 struct thread_info *saved_inferior = current_inferior;
669 current_inferior = (struct thread_info *)
670 find_inferior_id (&all_threads, (*childp)->lwpid);
671 /* For testing only; i386_stop_pc prints out a diagnostic. */
672 if (the_low_target.get_pc != NULL)
673 get_stop_pc ();
674 current_inferior = saved_inferior;
675 }
676 }
677
678 static int
679 linux_wait_for_event (struct thread_info *child)
680 {
681 CORE_ADDR stop_pc;
682 struct lwp_info *event_child;
683 int wstat;
684 int bp_status;
685
686 /* Check for a process with a pending status. */
687 /* It is possible that the user changed the pending task's registers since
688 it stopped. We correctly handle the change of PC if we hit a breakpoint
689 (in check_removed_breakpoint); signals should be reported anyway. */
690 if (child == NULL)
691 {
692 event_child = (struct lwp_info *)
693 find_inferior (&all_lwps, status_pending_p, NULL);
694 if (debug_threads && event_child)
695 fprintf (stderr, "Got a pending child %ld\n", event_child->lwpid);
696 }
697 else
698 {
699 event_child = get_thread_lwp (child);
700 if (event_child->status_pending_p
701 && check_removed_breakpoint (event_child))
702 event_child = NULL;
703 }
704
705 if (event_child != NULL)
706 {
707 if (event_child->status_pending_p)
708 {
709 if (debug_threads)
710 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
711 event_child->lwpid, event_child->status_pending);
712 wstat = event_child->status_pending;
713 event_child->status_pending_p = 0;
714 event_child->status_pending = 0;
715 current_inferior = get_lwp_thread (event_child);
716 return wstat;
717 }
718 }
719
720 /* We only enter this loop if no process has a pending wait status. Thus
721 any action taken in response to a wait status inside this loop is
722 responding as soon as we detect the status, not after any pending
723 events. */
724 while (1)
725 {
726 if (child == NULL)
727 event_child = NULL;
728 else
729 event_child = get_thread_lwp (child);
730
731 linux_wait_for_lwp (&event_child, &wstat);
732
733 if (event_child == NULL)
734 error ("event from unknown child");
735
736 current_inferior = (struct thread_info *)
737 find_inferior_id (&all_threads, event_child->lwpid);
738
739 /* Check for thread exit. */
740 if (! WIFSTOPPED (wstat))
741 {
742 if (debug_threads)
743 fprintf (stderr, "LWP %ld exiting\n", event_child->head.id);
744
745 /* If the last thread is exiting, just return. */
746 if (all_threads.head == all_threads.tail)
747 return wstat;
748
749 dead_thread_notify (thread_id_to_gdb_id (event_child->lwpid));
750
751 remove_inferior (&all_lwps, &event_child->head);
752 free (event_child);
753 remove_thread (current_inferior);
754 current_inferior = (struct thread_info *) all_threads.head;
755
756 /* If we were waiting for this particular child to do something...
757 well, it did something. */
758 if (child != NULL)
759 return wstat;
760
761 /* Wait for a more interesting event. */
762 continue;
763 }
764
765 if (WIFSTOPPED (wstat)
766 && WSTOPSIG (wstat) == SIGSTOP
767 && event_child->stop_expected)
768 {
769 if (debug_threads)
770 fprintf (stderr, "Expected stop.\n");
771 event_child->stop_expected = 0;
772 linux_resume_one_lwp (&event_child->head,
773 event_child->stepping, 0, NULL);
774 continue;
775 }
776
777 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
778 && wstat >> 16 != 0)
779 {
780 handle_extended_wait (event_child, wstat);
781 continue;
782 }
783
784 /* If GDB is not interested in this signal, don't stop other
785 threads, and don't report it to GDB. Just resume the
786 inferior right away. We do this for threading-related
787 signals as well as any that GDB specifically requested we
788 ignore. But never ignore SIGSTOP if we sent it ourselves,
789 and do not ignore signals when stepping - they may require
790 special handling to skip the signal handler. */
791 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
792 thread library? */
793 if (WIFSTOPPED (wstat)
794 && !event_child->stepping
795 && (
796 #ifdef USE_THREAD_DB
797 (thread_db_active && (WSTOPSIG (wstat) == __SIGRTMIN
798 || WSTOPSIG (wstat) == __SIGRTMIN + 1))
799 ||
800 #endif
801 (pass_signals[target_signal_from_host (WSTOPSIG (wstat))]
802 && (WSTOPSIG (wstat) != SIGSTOP || !stopping_threads))))
803 {
804 siginfo_t info, *info_p;
805
806 if (debug_threads)
807 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
808 WSTOPSIG (wstat), event_child->head.id);
809
810 if (ptrace (PTRACE_GETSIGINFO, event_child->lwpid, 0, &info) == 0)
811 info_p = &info;
812 else
813 info_p = NULL;
814 linux_resume_one_lwp (&event_child->head,
815 event_child->stepping,
816 WSTOPSIG (wstat), info_p);
817 continue;
818 }
819
820 /* If this event was not handled above, and is not a SIGTRAP, report
821 it. */
822 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGTRAP)
823 return wstat;
824
825 /* If this target does not support breakpoints, we simply report the
826 SIGTRAP; it's of no concern to us. */
827 if (the_low_target.get_pc == NULL)
828 return wstat;
829
830 stop_pc = get_stop_pc ();
831
832 /* bp_reinsert will only be set if we were single-stepping.
833 Notice that we will resume the process after hitting
834 a gdbserver breakpoint; single-stepping to/over one
835 is not supported (yet). */
836 if (event_child->bp_reinsert != 0)
837 {
838 if (debug_threads)
839 fprintf (stderr, "Reinserted breakpoint.\n");
840 reinsert_breakpoint (event_child->bp_reinsert);
841 event_child->bp_reinsert = 0;
842
843 /* Clear the single-stepping flag and SIGTRAP as we resume. */
844 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
845 continue;
846 }
847
848 bp_status = check_breakpoints (stop_pc);
849
850 if (bp_status != 0)
851 {
852 if (debug_threads)
853 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
854
855 /* We hit one of our own breakpoints. We mark it as a pending
856 breakpoint, so that check_removed_breakpoint () will do the PC
857 adjustment for us at the appropriate time. */
858 event_child->pending_is_breakpoint = 1;
859 event_child->pending_stop_pc = stop_pc;
860
861 /* We may need to put the breakpoint back. We continue in the event
862 loop instead of simply replacing the breakpoint right away,
863 in order to not lose signals sent to the thread that hit the
864 breakpoint. Unfortunately this increases the window where another
865 thread could sneak past the removed breakpoint. For the current
866 use of server-side breakpoints (thread creation) this is
867 acceptable; but it needs to be considered before this breakpoint
868 mechanism can be used in more general ways. For some breakpoints
869 it may be necessary to stop all other threads, but that should
870 be avoided where possible.
871
872 If breakpoint_reinsert_addr is NULL, that means that we can
873 use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint,
874 mark it for reinsertion, and single-step.
875
876 Otherwise, call the target function to figure out where we need
877 our temporary breakpoint, create it, and continue executing this
878 process. */
879 if (bp_status == 2)
880 /* No need to reinsert. */
881 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
882 else if (the_low_target.breakpoint_reinsert_addr == NULL)
883 {
884 event_child->bp_reinsert = stop_pc;
885 uninsert_breakpoint (stop_pc);
886 linux_resume_one_lwp (&event_child->head, 1, 0, NULL);
887 }
888 else
889 {
890 reinsert_breakpoint_by_bp
891 (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
892 linux_resume_one_lwp (&event_child->head, 0, 0, NULL);
893 }
894
895 continue;
896 }
897
898 if (debug_threads)
899 fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
900
901 /* If we were single-stepping, we definitely want to report the
902 SIGTRAP. The single-step operation has completed, so also
903 clear the stepping flag; in general this does not matter,
904 because the SIGTRAP will be reported to the client, which
905 will give us a new action for this thread, but clear it for
906 consistency anyway. It's safe to clear the stepping flag
907 because the only consumer of get_stop_pc () after this point
908 is check_removed_breakpoint, and pending_is_breakpoint is not
909 set. It might be wiser to use a step_completed flag instead. */
910 if (event_child->stepping)
911 {
912 event_child->stepping = 0;
913 return wstat;
914 }
915
916 /* A SIGTRAP that we can't explain. It may have been a breakpoint.
917 Check if it is a breakpoint, and if so mark the process information
918 accordingly. This will handle both the necessary fiddling with the
919 PC on decr_pc_after_break targets and suppressing extra threads
920 hitting a breakpoint if two hit it at once and then GDB removes it
921 after the first is reported. Arguably it would be better to report
922 multiple threads hitting breakpoints simultaneously, but the current
923 remote protocol does not allow this. */
924 if ((*the_low_target.breakpoint_at) (stop_pc))
925 {
926 event_child->pending_is_breakpoint = 1;
927 event_child->pending_stop_pc = stop_pc;
928 }
929
930 return wstat;
931 }
932
933 /* NOTREACHED */
934 return 0;
935 }
936
937 /* Wait for process, returns status. */
938
939 static unsigned char
940 linux_wait (char *status)
941 {
942 int w;
943 struct thread_info *child = NULL;
944
945 retry:
946 /* If we were only supposed to resume one thread, only wait for
947 that thread - if it's still alive. If it died, however - which
948 can happen if we're coming from the thread death case below -
949 then we need to make sure we restart the other threads. We could
950 pick a thread at random or restart all; restarting all is less
951 arbitrary. */
952 if (cont_thread != 0 && cont_thread != -1)
953 {
954 child = (struct thread_info *) find_inferior_id (&all_threads,
955 cont_thread);
956
957 /* No stepping, no signal - unless one is pending already, of course. */
958 if (child == NULL)
959 {
960 struct thread_resume resume_info;
961 resume_info.thread = -1;
962 resume_info.step = resume_info.sig = resume_info.leave_stopped = 0;
963 linux_resume (&resume_info);
964 }
965 }
966
967 w = linux_wait_for_event (child);
968 stop_all_lwps ();
969
970 if (must_set_ptrace_flags)
971 {
972 ptrace (PTRACE_SETOPTIONS, inferior_pid, 0, PTRACE_O_TRACECLONE);
973 must_set_ptrace_flags = 0;
974 }
975
976 /* If we are waiting for a particular child, and it exited,
977 linux_wait_for_event will return its exit status. Similarly if
978 the last child exited. If this is not the last child, however,
979 do not report it as exited until there is a 'thread exited' response
980 available in the remote protocol. Instead, just wait for another event.
981 This should be safe, because if the thread crashed we will already
982 have reported the termination signal to GDB; that should stop any
983 in-progress stepping operations, etc.
984
985 Report the exit status of the last thread to exit. This matches
986 LinuxThreads' behavior. */
987
988 if (all_threads.head == all_threads.tail)
989 {
990 if (WIFEXITED (w))
991 {
992 fprintf (stderr, "\nChild exited with retcode = %x \n",
993 WEXITSTATUS (w));
994 *status = 'W';
995 clear_inferiors ();
996 free (all_lwps.head);
997 all_lwps.head = all_lwps.tail = NULL;
998 return WEXITSTATUS (w);
999 }
1000 else if (!WIFSTOPPED (w))
1001 {
1002 fprintf (stderr, "\nChild terminated with signal = %x \n",
1003 WTERMSIG (w));
1004 *status = 'X';
1005 clear_inferiors ();
1006 free (all_lwps.head);
1007 all_lwps.head = all_lwps.tail = NULL;
1008 return target_signal_from_host (WTERMSIG (w));
1009 }
1010 }
1011 else
1012 {
1013 if (!WIFSTOPPED (w))
1014 goto retry;
1015 }
1016
1017 *status = 'T';
1018 return target_signal_from_host (WSTOPSIG (w));
1019 }
1020
1021 /* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if
1022 thread groups are in use, we need to use tkill. */
1023
1024 static int
1025 kill_lwp (unsigned long lwpid, int signo)
1026 {
1027 static int tkill_failed;
1028
1029 errno = 0;
1030
1031 #ifdef SYS_tkill
1032 if (!tkill_failed)
1033 {
1034 int ret = syscall (SYS_tkill, lwpid, signo);
1035 if (errno != ENOSYS)
1036 return ret;
1037 errno = 0;
1038 tkill_failed = 1;
1039 }
1040 #endif
1041
1042 return kill (lwpid, signo);
1043 }
1044
1045 static void
1046 send_sigstop (struct inferior_list_entry *entry)
1047 {
1048 struct lwp_info *lwp = (struct lwp_info *) entry;
1049
1050 if (lwp->stopped)
1051 return;
1052
1053 /* If we already have a pending stop signal for this process, don't
1054 send another. */
1055 if (lwp->stop_expected)
1056 {
1057 if (debug_threads)
1058 fprintf (stderr, "Have pending sigstop for lwp %ld\n",
1059 lwp->lwpid);
1060
1061 /* We clear the stop_expected flag so that wait_for_sigstop
1062 will receive the SIGSTOP event (instead of silently resuming and
1063 waiting again). It'll be reset below. */
1064 lwp->stop_expected = 0;
1065 return;
1066 }
1067
1068 if (debug_threads)
1069 fprintf (stderr, "Sending sigstop to lwp %ld\n", lwp->head.id);
1070
1071 kill_lwp (lwp->head.id, SIGSTOP);
1072 }
1073
1074 static void
1075 wait_for_sigstop (struct inferior_list_entry *entry)
1076 {
1077 struct lwp_info *lwp = (struct lwp_info *) entry;
1078 struct thread_info *saved_inferior, *thread;
1079 int wstat;
1080 unsigned long saved_tid;
1081
1082 if (lwp->stopped)
1083 return;
1084
1085 saved_inferior = current_inferior;
1086 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1087 thread = (struct thread_info *) find_inferior_id (&all_threads,
1088 lwp->lwpid);
1089 wstat = linux_wait_for_event (thread);
1090
1091 /* If we stopped with a non-SIGSTOP signal, save it for later
1092 and record the pending SIGSTOP. If the process exited, just
1093 return. */
1094 if (WIFSTOPPED (wstat)
1095 && WSTOPSIG (wstat) != SIGSTOP)
1096 {
1097 if (debug_threads)
1098 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1099 lwp->lwpid, wstat);
1100 lwp->status_pending_p = 1;
1101 lwp->status_pending = wstat;
1102 lwp->stop_expected = 1;
1103 }
1104
1105 if (linux_thread_alive (saved_tid))
1106 current_inferior = saved_inferior;
1107 else
1108 {
1109 if (debug_threads)
1110 fprintf (stderr, "Previously current thread died.\n");
1111
1112 /* Set a valid thread as current. */
1113 set_desired_inferior (0);
1114 }
1115 }
1116
1117 static void
1118 stop_all_lwps (void)
1119 {
1120 stopping_threads = 1;
1121 for_each_inferior (&all_lwps, send_sigstop);
1122 for_each_inferior (&all_lwps, wait_for_sigstop);
1123 stopping_threads = 0;
1124 }
1125
1126 /* Resume execution of the inferior process.
1127 If STEP is nonzero, single-step it.
1128 If SIGNAL is nonzero, give it that signal. */
1129
1130 static void
1131 linux_resume_one_lwp (struct inferior_list_entry *entry,
1132 int step, int signal, siginfo_t *info)
1133 {
1134 struct lwp_info *lwp = (struct lwp_info *) entry;
1135 struct thread_info *saved_inferior;
1136
1137 if (lwp->stopped == 0)
1138 return;
1139
1140 /* If we have pending signals or status, and a new signal, enqueue the
1141 signal. Also enqueue the signal if we are waiting to reinsert a
1142 breakpoint; it will be picked up again below. */
1143 if (signal != 0
1144 && (lwp->status_pending_p || lwp->pending_signals != NULL
1145 || lwp->bp_reinsert != 0))
1146 {
1147 struct pending_signals *p_sig;
1148 p_sig = xmalloc (sizeof (*p_sig));
1149 p_sig->prev = lwp->pending_signals;
1150 p_sig->signal = signal;
1151 if (info == NULL)
1152 memset (&p_sig->info, 0, sizeof (siginfo_t));
1153 else
1154 memcpy (&p_sig->info, info, sizeof (siginfo_t));
1155 lwp->pending_signals = p_sig;
1156 }
1157
1158 if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1159 return;
1160
1161 saved_inferior = current_inferior;
1162 current_inferior = get_lwp_thread (lwp);
1163
1164 if (debug_threads)
1165 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1166 inferior_pid, step ? "step" : "continue", signal,
1167 lwp->stop_expected ? "expected" : "not expected");
1168
1169 /* This bit needs some thinking about. If we get a signal that
1170 we must report while a single-step reinsert is still pending,
1171 we often end up resuming the thread. It might be better to
1172 (ew) allow a stack of pending events; then we could be sure that
1173 the reinsert happened right away and not lose any signals.
1174
1175 Making this stack would also shrink the window in which breakpoints are
1176 uninserted (see comment in linux_wait_for_lwp) but not enough for
1177 complete correctness, so it won't solve that problem. It may be
1178 worthwhile just to solve this one, however. */
1179 if (lwp->bp_reinsert != 0)
1180 {
1181 if (debug_threads)
1182 fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert);
1183 if (step == 0)
1184 fprintf (stderr, "BAD - reinserting but not stepping.\n");
1185 step = 1;
1186
1187 /* Postpone any pending signal. It was enqueued above. */
1188 signal = 0;
1189 }
1190
1191 check_removed_breakpoint (lwp);
1192
1193 if (debug_threads && the_low_target.get_pc != NULL)
1194 {
1195 fprintf (stderr, " ");
1196 (*the_low_target.get_pc) ();
1197 }
1198
1199 /* If we have pending signals, consume one unless we are trying to reinsert
1200 a breakpoint. */
1201 if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1202 {
1203 struct pending_signals **p_sig;
1204
1205 p_sig = &lwp->pending_signals;
1206 while ((*p_sig)->prev != NULL)
1207 p_sig = &(*p_sig)->prev;
1208
1209 signal = (*p_sig)->signal;
1210 if ((*p_sig)->info.si_signo != 0)
1211 ptrace (PTRACE_SETSIGINFO, lwp->lwpid, 0, &(*p_sig)->info);
1212
1213 free (*p_sig);
1214 *p_sig = NULL;
1215 }
1216
1217 regcache_invalidate_one ((struct inferior_list_entry *)
1218 get_lwp_thread (lwp));
1219 errno = 0;
1220 lwp->stopped = 0;
1221 lwp->stepping = step;
1222 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwp->lwpid, 0, signal);
1223
1224 current_inferior = saved_inferior;
1225 if (errno)
1226 {
1227 /* ESRCH from ptrace either means that the thread was already
1228 running (an error) or that it is gone (a race condition). If
1229 it's gone, we will get a notification the next time we wait,
1230 so we can ignore the error. We could differentiate these
1231 two, but it's tricky without waiting; the thread still exists
1232 as a zombie, so sending it signal 0 would succeed. So just
1233 ignore ESRCH. */
1234 if (errno == ESRCH)
1235 return;
1236
1237 perror_with_name ("ptrace");
1238 }
1239 }
1240
1241 static struct thread_resume *resume_ptr;
1242
1243 /* This function is called once per thread. We look up the thread
1244 in RESUME_PTR, and mark the thread with a pointer to the appropriate
1245 resume request.
1246
1247 This algorithm is O(threads * resume elements), but resume elements
1248 is small (and will remain small at least until GDB supports thread
1249 suspension). */
1250 static void
1251 linux_set_resume_request (struct inferior_list_entry *entry)
1252 {
1253 struct lwp_info *lwp;
1254 struct thread_info *thread;
1255 int ndx;
1256
1257 thread = (struct thread_info *) entry;
1258 lwp = get_thread_lwp (thread);
1259
1260 ndx = 0;
1261 while (resume_ptr[ndx].thread != -1 && resume_ptr[ndx].thread != entry->id)
1262 ndx++;
1263
1264 lwp->resume = &resume_ptr[ndx];
1265 }
1266
1267 /* This function is called once per thread. We check the thread's resume
1268 request, which will tell us whether to resume, step, or leave the thread
1269 stopped; and what signal, if any, it should be sent. For threads which
1270 we aren't explicitly told otherwise, we preserve the stepping flag; this
1271 is used for stepping over gdbserver-placed breakpoints. */
1272
1273 static void
1274 linux_continue_one_thread (struct inferior_list_entry *entry)
1275 {
1276 struct lwp_info *lwp;
1277 struct thread_info *thread;
1278 int step;
1279
1280 thread = (struct thread_info *) entry;
1281 lwp = get_thread_lwp (thread);
1282
1283 if (lwp->resume->leave_stopped)
1284 return;
1285
1286 if (lwp->resume->thread == -1)
1287 step = lwp->stepping || lwp->resume->step;
1288 else
1289 step = lwp->resume->step;
1290
1291 linux_resume_one_lwp (&lwp->head, step, lwp->resume->sig, NULL);
1292
1293 lwp->resume = NULL;
1294 }
1295
1296 /* This function is called once per thread. We check the thread's resume
1297 request, which will tell us whether to resume, step, or leave the thread
1298 stopped; and what signal, if any, it should be sent. We queue any needed
1299 signals, since we won't actually resume. We already have a pending event
1300 to report, so we don't need to preserve any step requests; they should
1301 be re-issued if necessary. */
1302
1303 static void
1304 linux_queue_one_thread (struct inferior_list_entry *entry)
1305 {
1306 struct lwp_info *lwp;
1307 struct thread_info *thread;
1308
1309 thread = (struct thread_info *) entry;
1310 lwp = get_thread_lwp (thread);
1311
1312 if (lwp->resume->leave_stopped)
1313 return;
1314
1315 /* If we have a new signal, enqueue the signal. */
1316 if (lwp->resume->sig != 0)
1317 {
1318 struct pending_signals *p_sig;
1319 p_sig = xmalloc (sizeof (*p_sig));
1320 p_sig->prev = lwp->pending_signals;
1321 p_sig->signal = lwp->resume->sig;
1322 memset (&p_sig->info, 0, sizeof (siginfo_t));
1323
1324 /* If this is the same signal we were previously stopped by,
1325 make sure to queue its siginfo. We can ignore the return
1326 value of ptrace; if it fails, we'll skip
1327 PTRACE_SETSIGINFO. */
1328 if (WIFSTOPPED (lwp->last_status)
1329 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
1330 ptrace (PTRACE_GETSIGINFO, lwp->lwpid, 0, &p_sig->info);
1331
1332 lwp->pending_signals = p_sig;
1333 }
1334
1335 lwp->resume = NULL;
1336 }
1337
1338 /* Set DUMMY if this process has an interesting status pending. */
1339 static int
1340 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1341 {
1342 struct lwp_info *lwp = (struct lwp_info *) entry;
1343
1344 /* Processes which will not be resumed are not interesting, because
1345 we might not wait for them next time through linux_wait. */
1346 if (lwp->resume->leave_stopped)
1347 return 0;
1348
1349 /* If this thread has a removed breakpoint, we won't have any
1350 events to report later, so check now. check_removed_breakpoint
1351 may clear status_pending_p. We avoid calling check_removed_breakpoint
1352 for any thread that we are not otherwise going to resume - this
1353 lets us preserve stopped status when two threads hit a breakpoint.
1354 GDB removes the breakpoint to single-step a particular thread
1355 past it, then re-inserts it and resumes all threads. We want
1356 to report the second thread without resuming it in the interim. */
1357 if (lwp->status_pending_p)
1358 check_removed_breakpoint (lwp);
1359
1360 if (lwp->status_pending_p)
1361 * (int *) flag_p = 1;
1362
1363 return 0;
1364 }
1365
1366 static void
1367 linux_resume (struct thread_resume *resume_info)
1368 {
1369 int pending_flag;
1370
1371 /* Yes, the use of a global here is rather ugly. */
1372 resume_ptr = resume_info;
1373
1374 for_each_inferior (&all_threads, linux_set_resume_request);
1375
1376 /* If there is a thread which would otherwise be resumed, which
1377 has a pending status, then don't resume any threads - we can just
1378 report the pending status. Make sure to queue any signals
1379 that would otherwise be sent. */
1380 pending_flag = 0;
1381 find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
1382
1383 if (debug_threads)
1384 {
1385 if (pending_flag)
1386 fprintf (stderr, "Not resuming, pending status\n");
1387 else
1388 fprintf (stderr, "Resuming, no pending status\n");
1389 }
1390
1391 if (pending_flag)
1392 for_each_inferior (&all_threads, linux_queue_one_thread);
1393 else
1394 for_each_inferior (&all_threads, linux_continue_one_thread);
1395 }
1396
1397 #ifdef HAVE_LINUX_USRREGS
1398
1399 int
1400 register_addr (int regnum)
1401 {
1402 int addr;
1403
1404 if (regnum < 0 || regnum >= the_low_target.num_regs)
1405 error ("Invalid register number %d.", regnum);
1406
1407 addr = the_low_target.regmap[regnum];
1408
1409 return addr;
1410 }
1411
1412 /* Fetch one register. */
1413 static void
1414 fetch_register (int regno)
1415 {
1416 CORE_ADDR regaddr;
1417 int i, size;
1418 char *buf;
1419
1420 if (regno >= the_low_target.num_regs)
1421 return;
1422 if ((*the_low_target.cannot_fetch_register) (regno))
1423 return;
1424
1425 regaddr = register_addr (regno);
1426 if (regaddr == -1)
1427 return;
1428 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1429 & - sizeof (PTRACE_XFER_TYPE));
1430 buf = alloca (size);
1431 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1432 {
1433 errno = 0;
1434 *(PTRACE_XFER_TYPE *) (buf + i) =
1435 ptrace (PTRACE_PEEKUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr, 0);
1436 regaddr += sizeof (PTRACE_XFER_TYPE);
1437 if (errno != 0)
1438 {
1439 /* Warning, not error, in case we are attached; sometimes the
1440 kernel doesn't let us at the registers. */
1441 char *err = strerror (errno);
1442 char *msg = alloca (strlen (err) + 128);
1443 sprintf (msg, "reading register %d: %s", regno, err);
1444 error (msg);
1445 goto error_exit;
1446 }
1447 }
1448
1449 if (the_low_target.supply_ptrace_register)
1450 the_low_target.supply_ptrace_register (regno, buf);
1451 else
1452 supply_register (regno, buf);
1453
1454 error_exit:;
1455 }
1456
1457 /* Fetch all registers, or just one, from the child process. */
1458 static void
1459 usr_fetch_inferior_registers (int regno)
1460 {
1461 if (regno == -1 || regno == 0)
1462 for (regno = 0; regno < the_low_target.num_regs; regno++)
1463 fetch_register (regno);
1464 else
1465 fetch_register (regno);
1466 }
1467
1468 /* Store our register values back into the inferior.
1469 If REGNO is -1, do this for all registers.
1470 Otherwise, REGNO specifies which register (so we can save time). */
1471 static void
1472 usr_store_inferior_registers (int regno)
1473 {
1474 CORE_ADDR regaddr;
1475 int i, size;
1476 char *buf;
1477
1478 if (regno >= 0)
1479 {
1480 if (regno >= the_low_target.num_regs)
1481 return;
1482
1483 if ((*the_low_target.cannot_store_register) (regno) == 1)
1484 return;
1485
1486 regaddr = register_addr (regno);
1487 if (regaddr == -1)
1488 return;
1489 errno = 0;
1490 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
1491 & - sizeof (PTRACE_XFER_TYPE);
1492 buf = alloca (size);
1493 memset (buf, 0, size);
1494
1495 if (the_low_target.collect_ptrace_register)
1496 the_low_target.collect_ptrace_register (regno, buf);
1497 else
1498 collect_register (regno, buf);
1499
1500 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
1501 {
1502 errno = 0;
1503 ptrace (PTRACE_POKEUSER, inferior_pid, (PTRACE_ARG3_TYPE) regaddr,
1504 *(PTRACE_XFER_TYPE *) (buf + i));
1505 if (errno != 0)
1506 {
1507 /* At this point, ESRCH should mean the process is
1508 already gone, in which case we simply ignore attempts
1509 to change its registers. See also the related
1510 comment in linux_resume_one_lwp. */
1511 if (errno == ESRCH)
1512 return;
1513
1514 if ((*the_low_target.cannot_store_register) (regno) == 0)
1515 {
1516 char *err = strerror (errno);
1517 char *msg = alloca (strlen (err) + 128);
1518 sprintf (msg, "writing register %d: %s",
1519 regno, err);
1520 error (msg);
1521 return;
1522 }
1523 }
1524 regaddr += sizeof (PTRACE_XFER_TYPE);
1525 }
1526 }
1527 else
1528 for (regno = 0; regno < the_low_target.num_regs; regno++)
1529 usr_store_inferior_registers (regno);
1530 }
1531 #endif /* HAVE_LINUX_USRREGS */
1532
1533
1534
1535 #ifdef HAVE_LINUX_REGSETS
1536
1537 static int
1538 regsets_fetch_inferior_registers ()
1539 {
1540 struct regset_info *regset;
1541 int saw_general_regs = 0;
1542
1543 regset = target_regsets;
1544
1545 while (regset->size >= 0)
1546 {
1547 void *buf;
1548 int res;
1549
1550 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1551 {
1552 regset ++;
1553 continue;
1554 }
1555
1556 buf = xmalloc (regset->size);
1557 #ifndef __sparc__
1558 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1559 #else
1560 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1561 #endif
1562 if (res < 0)
1563 {
1564 if (errno == EIO)
1565 {
1566 /* If we get EIO on a regset, do not try it again for
1567 this process. */
1568 disabled_regsets[regset - target_regsets] = 1;
1569 continue;
1570 }
1571 else
1572 {
1573 char s[256];
1574 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%ld",
1575 inferior_pid);
1576 perror (s);
1577 }
1578 }
1579 else if (regset->type == GENERAL_REGS)
1580 saw_general_regs = 1;
1581 regset->store_function (buf);
1582 regset ++;
1583 }
1584 if (saw_general_regs)
1585 return 0;
1586 else
1587 return 1;
1588 }
1589
1590 static int
1591 regsets_store_inferior_registers ()
1592 {
1593 struct regset_info *regset;
1594 int saw_general_regs = 0;
1595
1596 regset = target_regsets;
1597
1598 while (regset->size >= 0)
1599 {
1600 void *buf;
1601 int res;
1602
1603 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
1604 {
1605 regset ++;
1606 continue;
1607 }
1608
1609 buf = xmalloc (regset->size);
1610
1611 /* First fill the buffer with the current register set contents,
1612 in case there are any items in the kernel's regset that are
1613 not in gdbserver's regcache. */
1614 #ifndef __sparc__
1615 res = ptrace (regset->get_request, inferior_pid, 0, buf);
1616 #else
1617 res = ptrace (regset->get_request, inferior_pid, buf, 0);
1618 #endif
1619
1620 if (res == 0)
1621 {
1622 /* Then overlay our cached registers on that. */
1623 regset->fill_function (buf);
1624
1625 /* Only now do we write the register set. */
1626 #ifndef __sparc__
1627 res = ptrace (regset->set_request, inferior_pid, 0, buf);
1628 #else
1629 res = ptrace (regset->set_request, inferior_pid, buf, 0);
1630 #endif
1631 }
1632
1633 if (res < 0)
1634 {
1635 if (errno == EIO)
1636 {
1637 /* If we get EIO on a regset, do not try it again for
1638 this process. */
1639 disabled_regsets[regset - target_regsets] = 1;
1640 continue;
1641 }
1642 else if (errno == ESRCH)
1643 {
1644 /* At this point, ESRCH should mean the process is
1645 already gone, in which case we simply ignore attempts
1646 to change its registers. See also the related
1647 comment in linux_resume_one_lwp. */
1648 return 0;
1649 }
1650 else
1651 {
1652 perror ("Warning: ptrace(regsets_store_inferior_registers)");
1653 }
1654 }
1655 else if (regset->type == GENERAL_REGS)
1656 saw_general_regs = 1;
1657 regset ++;
1658 free (buf);
1659 }
1660 if (saw_general_regs)
1661 return 0;
1662 else
1663 return 1;
1664 return 0;
1665 }
1666
1667 #endif /* HAVE_LINUX_REGSETS */
1668
1669
1670 void
1671 linux_fetch_registers (int regno)
1672 {
1673 #ifdef HAVE_LINUX_REGSETS
1674 if (regsets_fetch_inferior_registers () == 0)
1675 return;
1676 #endif
1677 #ifdef HAVE_LINUX_USRREGS
1678 usr_fetch_inferior_registers (regno);
1679 #endif
1680 }
1681
1682 void
1683 linux_store_registers (int regno)
1684 {
1685 #ifdef HAVE_LINUX_REGSETS
1686 if (regsets_store_inferior_registers () == 0)
1687 return;
1688 #endif
1689 #ifdef HAVE_LINUX_USRREGS
1690 usr_store_inferior_registers (regno);
1691 #endif
1692 }
1693
1694
1695 /* Copy LEN bytes from inferior's memory starting at MEMADDR
1696 to debugger memory starting at MYADDR. */
1697
1698 static int
1699 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
1700 {
1701 register int i;
1702 /* Round starting address down to longword boundary. */
1703 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1704 /* Round ending address up; get number of longwords that makes. */
1705 register int count
1706 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
1707 / sizeof (PTRACE_XFER_TYPE);
1708 /* Allocate buffer of that many longwords. */
1709 register PTRACE_XFER_TYPE *buffer
1710 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1711 int fd;
1712 char filename[64];
1713
1714 /* Try using /proc. Don't bother for one word. */
1715 if (len >= 3 * sizeof (long))
1716 {
1717 /* We could keep this file open and cache it - possibly one per
1718 thread. That requires some juggling, but is even faster. */
1719 sprintf (filename, "/proc/%ld/mem", inferior_pid);
1720 fd = open (filename, O_RDONLY | O_LARGEFILE);
1721 if (fd == -1)
1722 goto no_proc;
1723
1724 /* If pread64 is available, use it. It's faster if the kernel
1725 supports it (only one syscall), and it's 64-bit safe even on
1726 32-bit platforms (for instance, SPARC debugging a SPARC64
1727 application). */
1728 #ifdef HAVE_PREAD64
1729 if (pread64 (fd, myaddr, len, memaddr) != len)
1730 #else
1731 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len)
1732 #endif
1733 {
1734 close (fd);
1735 goto no_proc;
1736 }
1737
1738 close (fd);
1739 return 0;
1740 }
1741
1742 no_proc:
1743 /* Read all the longwords */
1744 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1745 {
1746 errno = 0;
1747 buffer[i] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1748 (PTRACE_ARG3_TYPE) addr, 0);
1749 if (errno)
1750 return errno;
1751 }
1752
1753 /* Copy appropriate bytes out of the buffer. */
1754 memcpy (myaddr,
1755 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
1756 len);
1757
1758 return 0;
1759 }
1760
1761 /* Copy LEN bytes of data from debugger memory at MYADDR
1762 to inferior's memory at MEMADDR.
1763 On failure (cannot write the inferior)
1764 returns the value of errno. */
1765
1766 static int
1767 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
1768 {
1769 register int i;
1770 /* Round starting address down to longword boundary. */
1771 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
1772 /* Round ending address up; get number of longwords that makes. */
1773 register int count
1774 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
1775 /* Allocate buffer of that many longwords. */
1776 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
1777
1778 if (debug_threads)
1779 {
1780 fprintf (stderr, "Writing %02x to %08lx\n", (unsigned)myaddr[0], (long)memaddr);
1781 }
1782
1783 /* Fill start and end extra bytes of buffer with existing memory data. */
1784
1785 buffer[0] = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1786 (PTRACE_ARG3_TYPE) addr, 0);
1787
1788 if (count > 1)
1789 {
1790 buffer[count - 1]
1791 = ptrace (PTRACE_PEEKTEXT, inferior_pid,
1792 (PTRACE_ARG3_TYPE) (addr + (count - 1)
1793 * sizeof (PTRACE_XFER_TYPE)),
1794 0);
1795 }
1796
1797 /* Copy data to be written over corresponding part of buffer */
1798
1799 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
1800
1801 /* Write the entire buffer. */
1802
1803 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
1804 {
1805 errno = 0;
1806 ptrace (PTRACE_POKETEXT, inferior_pid, (PTRACE_ARG3_TYPE) addr, buffer[i]);
1807 if (errno)
1808 return errno;
1809 }
1810
1811 return 0;
1812 }
1813
1814 static int linux_supports_tracefork_flag;
1815
1816 /* Helper functions for linux_test_for_tracefork, called via clone (). */
1817
1818 static int
1819 linux_tracefork_grandchild (void *arg)
1820 {
1821 _exit (0);
1822 }
1823
1824 #define STACK_SIZE 4096
1825
1826 static int
1827 linux_tracefork_child (void *arg)
1828 {
1829 ptrace (PTRACE_TRACEME, 0, 0, 0);
1830 kill (getpid (), SIGSTOP);
1831 #ifdef __ia64__
1832 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
1833 CLONE_VM | SIGCHLD, NULL);
1834 #else
1835 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
1836 CLONE_VM | SIGCHLD, NULL);
1837 #endif
1838 _exit (0);
1839 }
1840
1841 /* Wrapper function for waitpid which handles EINTR. */
1842
1843 static int
1844 my_waitpid (int pid, int *status, int flags)
1845 {
1846 int ret;
1847 do
1848 {
1849 ret = waitpid (pid, status, flags);
1850 }
1851 while (ret == -1 && errno == EINTR);
1852
1853 return ret;
1854 }
1855
1856 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
1857 sure that we can enable the option, and that it had the desired
1858 effect. */
1859
1860 static void
1861 linux_test_for_tracefork (void)
1862 {
1863 int child_pid, ret, status;
1864 long second_pid;
1865 char *stack = xmalloc (STACK_SIZE * 4);
1866
1867 linux_supports_tracefork_flag = 0;
1868
1869 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
1870 #ifdef __ia64__
1871 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
1872 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1873 #else
1874 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
1875 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
1876 #endif
1877 if (child_pid == -1)
1878 perror_with_name ("clone");
1879
1880 ret = my_waitpid (child_pid, &status, 0);
1881 if (ret == -1)
1882 perror_with_name ("waitpid");
1883 else if (ret != child_pid)
1884 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
1885 if (! WIFSTOPPED (status))
1886 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
1887
1888 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
1889 if (ret != 0)
1890 {
1891 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1892 if (ret != 0)
1893 {
1894 warning ("linux_test_for_tracefork: failed to kill child");
1895 return;
1896 }
1897
1898 ret = my_waitpid (child_pid, &status, 0);
1899 if (ret != child_pid)
1900 warning ("linux_test_for_tracefork: failed to wait for killed child");
1901 else if (!WIFSIGNALED (status))
1902 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
1903 "killed child", status);
1904
1905 return;
1906 }
1907
1908 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
1909 if (ret != 0)
1910 warning ("linux_test_for_tracefork: failed to resume child");
1911
1912 ret = my_waitpid (child_pid, &status, 0);
1913
1914 if (ret == child_pid && WIFSTOPPED (status)
1915 && status >> 16 == PTRACE_EVENT_FORK)
1916 {
1917 second_pid = 0;
1918 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
1919 if (ret == 0 && second_pid != 0)
1920 {
1921 int second_status;
1922
1923 linux_supports_tracefork_flag = 1;
1924 my_waitpid (second_pid, &second_status, 0);
1925 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
1926 if (ret != 0)
1927 warning ("linux_test_for_tracefork: failed to kill second child");
1928 my_waitpid (second_pid, &status, 0);
1929 }
1930 }
1931 else
1932 warning ("linux_test_for_tracefork: unexpected result from waitpid "
1933 "(%d, status 0x%x)", ret, status);
1934
1935 do
1936 {
1937 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
1938 if (ret != 0)
1939 warning ("linux_test_for_tracefork: failed to kill child");
1940 my_waitpid (child_pid, &status, 0);
1941 }
1942 while (WIFSTOPPED (status));
1943
1944 free (stack);
1945 }
1946
1947
1948 static void
1949 linux_look_up_symbols (void)
1950 {
1951 #ifdef USE_THREAD_DB
1952 if (thread_db_active)
1953 return;
1954
1955 thread_db_active = thread_db_init (!linux_supports_tracefork_flag);
1956 #endif
1957 }
1958
1959 static void
1960 linux_request_interrupt (void)
1961 {
1962 extern unsigned long signal_pid;
1963
1964 if (cont_thread != 0 && cont_thread != -1)
1965 {
1966 struct lwp_info *lwp;
1967
1968 lwp = get_thread_lwp (current_inferior);
1969 kill_lwp (lwp->lwpid, SIGINT);
1970 }
1971 else
1972 kill_lwp (signal_pid, SIGINT);
1973 }
1974
1975 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
1976 to debugger memory starting at MYADDR. */
1977
1978 static int
1979 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
1980 {
1981 char filename[PATH_MAX];
1982 int fd, n;
1983
1984 snprintf (filename, sizeof filename, "/proc/%ld/auxv", inferior_pid);
1985
1986 fd = open (filename, O_RDONLY);
1987 if (fd < 0)
1988 return -1;
1989
1990 if (offset != (CORE_ADDR) 0
1991 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
1992 n = -1;
1993 else
1994 n = read (fd, myaddr, len);
1995
1996 close (fd);
1997
1998 return n;
1999 }
2000
2001 /* These watchpoint related wrapper functions simply pass on the function call
2002 if the target has registered a corresponding function. */
2003
2004 static int
2005 linux_insert_watchpoint (char type, CORE_ADDR addr, int len)
2006 {
2007 if (the_low_target.insert_watchpoint != NULL)
2008 return the_low_target.insert_watchpoint (type, addr, len);
2009 else
2010 /* Unsupported (see target.h). */
2011 return 1;
2012 }
2013
2014 static int
2015 linux_remove_watchpoint (char type, CORE_ADDR addr, int len)
2016 {
2017 if (the_low_target.remove_watchpoint != NULL)
2018 return the_low_target.remove_watchpoint (type, addr, len);
2019 else
2020 /* Unsupported (see target.h). */
2021 return 1;
2022 }
2023
2024 static int
2025 linux_stopped_by_watchpoint (void)
2026 {
2027 if (the_low_target.stopped_by_watchpoint != NULL)
2028 return the_low_target.stopped_by_watchpoint ();
2029 else
2030 return 0;
2031 }
2032
2033 static CORE_ADDR
2034 linux_stopped_data_address (void)
2035 {
2036 if (the_low_target.stopped_data_address != NULL)
2037 return the_low_target.stopped_data_address ();
2038 else
2039 return 0;
2040 }
2041
2042 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2043 #if defined(__mcoldfire__)
2044 /* These should really be defined in the kernel's ptrace.h header. */
2045 #define PT_TEXT_ADDR 49*4
2046 #define PT_DATA_ADDR 50*4
2047 #define PT_TEXT_END_ADDR 51*4
2048 #endif
2049
2050 /* Under uClinux, programs are loaded at non-zero offsets, which we need
2051 to tell gdb about. */
2052
2053 static int
2054 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2055 {
2056 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2057 unsigned long text, text_end, data;
2058 int pid = get_thread_lwp (current_inferior)->head.id;
2059
2060 errno = 0;
2061
2062 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2063 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2064 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2065
2066 if (errno == 0)
2067 {
2068 /* Both text and data offsets produced at compile-time (and so
2069 used by gdb) are relative to the beginning of the program,
2070 with the data segment immediately following the text segment.
2071 However, the actual runtime layout in memory may put the data
2072 somewhere else, so when we send gdb a data base-address, we
2073 use the real data base address and subtract the compile-time
2074 data base-address from it (which is just the length of the
2075 text segment). BSS immediately follows data in both
2076 cases. */
2077 *text_p = text;
2078 *data_p = data - (text_end - text);
2079
2080 return 1;
2081 }
2082 #endif
2083 return 0;
2084 }
2085 #endif
2086
2087 static int
2088 linux_qxfer_osdata (const char *annex,
2089 unsigned char *readbuf, unsigned const char *writebuf,
2090 CORE_ADDR offset, int len)
2091 {
2092 /* We make the process list snapshot when the object starts to be
2093 read. */
2094 static const char *buf;
2095 static long len_avail = -1;
2096 static struct buffer buffer;
2097
2098 DIR *dirp;
2099
2100 if (strcmp (annex, "processes") != 0)
2101 return 0;
2102
2103 if (!readbuf || writebuf)
2104 return 0;
2105
2106 if (offset == 0)
2107 {
2108 if (len_avail != -1 && len_avail != 0)
2109 buffer_free (&buffer);
2110 len_avail = 0;
2111 buf = NULL;
2112 buffer_init (&buffer);
2113 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
2114
2115 dirp = opendir ("/proc");
2116 if (dirp)
2117 {
2118 struct dirent *dp;
2119 while ((dp = readdir (dirp)) != NULL)
2120 {
2121 struct stat statbuf;
2122 char procentry[sizeof ("/proc/4294967295")];
2123
2124 if (!isdigit (dp->d_name[0])
2125 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
2126 continue;
2127
2128 sprintf (procentry, "/proc/%s", dp->d_name);
2129 if (stat (procentry, &statbuf) == 0
2130 && S_ISDIR (statbuf.st_mode))
2131 {
2132 char pathname[128];
2133 FILE *f;
2134 char cmd[MAXPATHLEN + 1];
2135 struct passwd *entry;
2136
2137 sprintf (pathname, "/proc/%s/cmdline", dp->d_name);
2138 entry = getpwuid (statbuf.st_uid);
2139
2140 if ((f = fopen (pathname, "r")) != NULL)
2141 {
2142 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
2143 if (len > 0)
2144 {
2145 int i;
2146 for (i = 0; i < len; i++)
2147 if (cmd[i] == '\0')
2148 cmd[i] = ' ';
2149 cmd[len] = '\0';
2150
2151 buffer_xml_printf (
2152 &buffer,
2153 "<item>"
2154 "<column name=\"pid\">%s</column>"
2155 "<column name=\"user\">%s</column>"
2156 "<column name=\"command\">%s</column>"
2157 "</item>",
2158 dp->d_name,
2159 entry ? entry->pw_name : "?",
2160 cmd);
2161 }
2162 fclose (f);
2163 }
2164 }
2165 }
2166
2167 closedir (dirp);
2168 }
2169 buffer_grow_str0 (&buffer, "</osdata>\n");
2170 buf = buffer_finish (&buffer);
2171 len_avail = strlen (buf);
2172 }
2173
2174 if (offset >= len_avail)
2175 {
2176 /* Done. Get rid of the data. */
2177 buffer_free (&buffer);
2178 buf = NULL;
2179 len_avail = 0;
2180 return 0;
2181 }
2182
2183 if (len > len_avail - offset)
2184 len = len_avail - offset;
2185 memcpy (readbuf, buf + offset, len);
2186
2187 return len;
2188 }
2189
2190 static int
2191 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
2192 unsigned const char *writebuf, CORE_ADDR offset, int len)
2193 {
2194 struct siginfo siginfo;
2195 long pid = -1;
2196
2197 if (current_inferior == NULL)
2198 return -1;
2199
2200 pid = pid_of (get_thread_lwp (current_inferior));
2201
2202 if (debug_threads)
2203 fprintf (stderr, "%s siginfo for lwp %ld.\n",
2204 readbuf != NULL ? "Reading" : "Writing",
2205 pid);
2206
2207 if (offset > sizeof (siginfo))
2208 return -1;
2209
2210 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
2211 return -1;
2212
2213 if (offset + len > sizeof (siginfo))
2214 len = sizeof (siginfo) - offset;
2215
2216 if (readbuf != NULL)
2217 memcpy (readbuf, (char *) &siginfo + offset, len);
2218 else
2219 {
2220 memcpy ((char *) &siginfo + offset, writebuf, len);
2221 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
2222 return -1;
2223 }
2224
2225 return len;
2226 }
2227
2228 static struct target_ops linux_target_ops = {
2229 linux_create_inferior,
2230 linux_attach,
2231 linux_kill,
2232 linux_detach,
2233 linux_join,
2234 linux_thread_alive,
2235 linux_resume,
2236 linux_wait,
2237 linux_fetch_registers,
2238 linux_store_registers,
2239 linux_read_memory,
2240 linux_write_memory,
2241 linux_look_up_symbols,
2242 linux_request_interrupt,
2243 linux_read_auxv,
2244 linux_insert_watchpoint,
2245 linux_remove_watchpoint,
2246 linux_stopped_by_watchpoint,
2247 linux_stopped_data_address,
2248 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
2249 linux_read_offsets,
2250 #else
2251 NULL,
2252 #endif
2253 #ifdef USE_THREAD_DB
2254 thread_db_get_tls_address,
2255 #else
2256 NULL,
2257 #endif
2258 NULL,
2259 hostio_last_error_from_errno,
2260 linux_qxfer_osdata,
2261 linux_xfer_siginfo,
2262 };
2263
2264 static void
2265 linux_init_signals ()
2266 {
2267 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
2268 to find what the cancel signal actually is. */
2269 signal (__SIGRTMIN+1, SIG_IGN);
2270 }
2271
2272 void
2273 initialize_low (void)
2274 {
2275 thread_db_active = 0;
2276 set_target_ops (&linux_target_ops);
2277 set_breakpoint_data (the_low_target.breakpoint,
2278 the_low_target.breakpoint_len);
2279 linux_init_signals ();
2280 linux_test_for_tracefork ();
2281 #ifdef HAVE_LINUX_REGSETS
2282 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
2283 ;
2284 disabled_regsets = xmalloc (num_regsets);
2285 #endif
2286 }
This page took 0.07527 seconds and 5 git commands to generate.