Add x86 AVX support to gdbserver.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include "server.h"
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "i386-low.h"
27 #include "i386-xstate.h"
28 #include "elf/common.h"
29
30 #include "gdb_proc_service.h"
31
32 /* Defined in auto-generated file i386-linux.c. */
33 void init_registers_i386_linux (void);
34 /* Defined in auto-generated file amd64-linux.c. */
35 void init_registers_amd64_linux (void);
36 /* Defined in auto-generated file i386-avx-linux.c. */
37 void init_registers_i386_avx_linux (void);
38 /* Defined in auto-generated file amd64-avx-linux.c. */
39 void init_registers_amd64_avx_linux (void);
40
41 /* Backward compatibility for gdb without XML support. */
42
43 static const char *xmltarget_i386_linux_no_xml = "@<target>\
44 <architecture>i386</architecture>\
45 <osabi>GNU/Linux</osabi>\
46 </target>";
47 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
48 <architecture>i386:x86-64</architecture>\
49 <osabi>GNU/Linux</osabi>\
50 </target>";
51
52 #include <sys/reg.h>
53 #include <sys/procfs.h>
54 #include <sys/ptrace.h>
55 #include <sys/uio.h>
56
57 #ifndef PTRACE_GETREGSET
58 #define PTRACE_GETREGSET 0x4204
59 #endif
60
61 #ifndef PTRACE_SETREGSET
62 #define PTRACE_SETREGSET 0x4205
63 #endif
64
65
66 #ifndef PTRACE_GET_THREAD_AREA
67 #define PTRACE_GET_THREAD_AREA 25
68 #endif
69
70 /* This definition comes from prctl.h, but some kernels may not have it. */
71 #ifndef PTRACE_ARCH_PRCTL
72 #define PTRACE_ARCH_PRCTL 30
73 #endif
74
75 /* The following definitions come from prctl.h, but may be absent
76 for certain configurations. */
77 #ifndef ARCH_GET_FS
78 #define ARCH_SET_GS 0x1001
79 #define ARCH_SET_FS 0x1002
80 #define ARCH_GET_FS 0x1003
81 #define ARCH_GET_GS 0x1004
82 #endif
83
84 /* Per-process arch-specific data we want to keep. */
85
86 struct arch_process_info
87 {
88 struct i386_debug_reg_state debug_reg_state;
89 };
90
91 /* Per-thread arch-specific data we want to keep. */
92
93 struct arch_lwp_info
94 {
95 /* Non-zero if our copy differs from what's recorded in the thread. */
96 int debug_registers_changed;
97 };
98
99 #ifdef __x86_64__
100
101 /* Mapping between the general-purpose registers in `struct user'
102 format and GDB's register array layout.
103 Note that the transfer layout uses 64-bit regs. */
104 static /*const*/ int i386_regmap[] =
105 {
106 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
107 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
108 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
109 DS * 8, ES * 8, FS * 8, GS * 8
110 };
111
112 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
113
114 /* So code below doesn't have to care, i386 or amd64. */
115 #define ORIG_EAX ORIG_RAX
116
117 static const int x86_64_regmap[] =
118 {
119 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
120 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
121 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
122 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
123 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
124 DS * 8, ES * 8, FS * 8, GS * 8,
125 -1, -1, -1, -1, -1, -1, -1, -1,
126 -1, -1, -1, -1, -1, -1, -1, -1,
127 -1, -1, -1, -1, -1, -1, -1, -1,
128 -1, -1, -1, -1, -1, -1, -1, -1, -1,
129 ORIG_RAX * 8
130 };
131
132 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
133
134 #else /* ! __x86_64__ */
135
136 /* Mapping between the general-purpose registers in `struct user'
137 format and GDB's register array layout. */
138 static /*const*/ int i386_regmap[] =
139 {
140 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
141 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
142 EIP * 4, EFL * 4, CS * 4, SS * 4,
143 DS * 4, ES * 4, FS * 4, GS * 4
144 };
145
146 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
147
148 #endif
149 \f
150 /* Called by libthread_db. */
151
152 ps_err_e
153 ps_get_thread_area (const struct ps_prochandle *ph,
154 lwpid_t lwpid, int idx, void **base)
155 {
156 #ifdef __x86_64__
157 int use_64bit = register_size (0) == 8;
158
159 if (use_64bit)
160 {
161 switch (idx)
162 {
163 case FS:
164 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
165 return PS_OK;
166 break;
167 case GS:
168 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
169 return PS_OK;
170 break;
171 default:
172 return PS_BADADDR;
173 }
174 return PS_ERR;
175 }
176 #endif
177
178 {
179 unsigned int desc[4];
180
181 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
182 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
183 return PS_ERR;
184
185 *(int *)base = desc[1];
186 return PS_OK;
187 }
188 }
189 \f
190 static int
191 i386_cannot_store_register (int regno)
192 {
193 return regno >= I386_NUM_REGS;
194 }
195
196 static int
197 i386_cannot_fetch_register (int regno)
198 {
199 return regno >= I386_NUM_REGS;
200 }
201
202 static void
203 x86_fill_gregset (struct regcache *regcache, void *buf)
204 {
205 int i;
206
207 #ifdef __x86_64__
208 if (register_size (0) == 8)
209 {
210 for (i = 0; i < X86_64_NUM_REGS; i++)
211 if (x86_64_regmap[i] != -1)
212 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
213 return;
214 }
215 #endif
216
217 for (i = 0; i < I386_NUM_REGS; i++)
218 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
219
220 collect_register_by_name (regcache, "orig_eax",
221 ((char *) buf) + ORIG_EAX * 4);
222 }
223
224 static void
225 x86_store_gregset (struct regcache *regcache, const void *buf)
226 {
227 int i;
228
229 #ifdef __x86_64__
230 if (register_size (0) == 8)
231 {
232 for (i = 0; i < X86_64_NUM_REGS; i++)
233 if (x86_64_regmap[i] != -1)
234 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
235 return;
236 }
237 #endif
238
239 for (i = 0; i < I386_NUM_REGS; i++)
240 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
241
242 supply_register_by_name (regcache, "orig_eax",
243 ((char *) buf) + ORIG_EAX * 4);
244 }
245
246 static void
247 x86_fill_fpregset (struct regcache *regcache, void *buf)
248 {
249 #ifdef __x86_64__
250 i387_cache_to_fxsave (regcache, buf);
251 #else
252 i387_cache_to_fsave (regcache, buf);
253 #endif
254 }
255
256 static void
257 x86_store_fpregset (struct regcache *regcache, const void *buf)
258 {
259 #ifdef __x86_64__
260 i387_fxsave_to_cache (regcache, buf);
261 #else
262 i387_fsave_to_cache (regcache, buf);
263 #endif
264 }
265
266 #ifndef __x86_64__
267
268 static void
269 x86_fill_fpxregset (struct regcache *regcache, void *buf)
270 {
271 i387_cache_to_fxsave (regcache, buf);
272 }
273
274 static void
275 x86_store_fpxregset (struct regcache *regcache, const void *buf)
276 {
277 i387_fxsave_to_cache (regcache, buf);
278 }
279
280 #endif
281
282 static void
283 x86_fill_xstateregset (struct regcache *regcache, void *buf)
284 {
285 i387_cache_to_xsave (regcache, buf);
286 }
287
288 static void
289 x86_store_xstateregset (struct regcache *regcache, const void *buf)
290 {
291 i387_xsave_to_cache (regcache, buf);
292 }
293
294 /* ??? The non-biarch i386 case stores all the i387 regs twice.
295 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
296 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
297 doesn't work. IWBN to avoid the duplication in the case where it
298 does work. Maybe the arch_setup routine could check whether it works
299 and update target_regsets accordingly, maybe by moving target_regsets
300 to linux_target_ops and set the right one there, rather than having to
301 modify the target_regsets global. */
302
303 struct regset_info target_regsets[] =
304 {
305 #ifdef HAVE_PTRACE_GETREGS
306 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
307 GENERAL_REGS,
308 x86_fill_gregset, x86_store_gregset },
309 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
310 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
311 # ifndef __x86_64__
312 # ifdef HAVE_PTRACE_GETFPXREGS
313 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
314 EXTENDED_REGS,
315 x86_fill_fpxregset, x86_store_fpxregset },
316 # endif
317 # endif
318 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
319 FP_REGS,
320 x86_fill_fpregset, x86_store_fpregset },
321 #endif /* HAVE_PTRACE_GETREGS */
322 { 0, 0, 0, -1, -1, NULL, NULL }
323 };
324
325 static CORE_ADDR
326 x86_get_pc (struct regcache *regcache)
327 {
328 int use_64bit = register_size (0) == 8;
329
330 if (use_64bit)
331 {
332 unsigned long pc;
333 collect_register_by_name (regcache, "rip", &pc);
334 return (CORE_ADDR) pc;
335 }
336 else
337 {
338 unsigned int pc;
339 collect_register_by_name (regcache, "eip", &pc);
340 return (CORE_ADDR) pc;
341 }
342 }
343
344 static void
345 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
346 {
347 int use_64bit = register_size (0) == 8;
348
349 if (use_64bit)
350 {
351 unsigned long newpc = pc;
352 supply_register_by_name (regcache, "rip", &newpc);
353 }
354 else
355 {
356 unsigned int newpc = pc;
357 supply_register_by_name (regcache, "eip", &newpc);
358 }
359 }
360 \f
361 static const unsigned char x86_breakpoint[] = { 0xCC };
362 #define x86_breakpoint_len 1
363
364 static int
365 x86_breakpoint_at (CORE_ADDR pc)
366 {
367 unsigned char c;
368
369 (*the_target->read_memory) (pc, &c, 1);
370 if (c == 0xCC)
371 return 1;
372
373 return 0;
374 }
375 \f
376 /* Support for debug registers. */
377
378 static unsigned long
379 x86_linux_dr_get (ptid_t ptid, int regnum)
380 {
381 int tid;
382 unsigned long value;
383
384 tid = ptid_get_lwp (ptid);
385
386 errno = 0;
387 value = ptrace (PTRACE_PEEKUSER, tid,
388 offsetof (struct user, u_debugreg[regnum]), 0);
389 if (errno != 0)
390 error ("Couldn't read debug register");
391
392 return value;
393 }
394
395 static void
396 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
397 {
398 int tid;
399
400 tid = ptid_get_lwp (ptid);
401
402 errno = 0;
403 ptrace (PTRACE_POKEUSER, tid,
404 offsetof (struct user, u_debugreg[regnum]), value);
405 if (errno != 0)
406 error ("Couldn't write debug register");
407 }
408
409 /* Update the inferior's debug register REGNUM from STATE. */
410
411 void
412 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
413 {
414 struct inferior_list_entry *lp;
415 CORE_ADDR addr;
416 /* Only need to update the threads of this process. */
417 int pid = pid_of (get_thread_lwp (current_inferior));
418
419 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
420 fatal ("Invalid debug register %d", regnum);
421
422 addr = state->dr_mirror[regnum];
423
424 for (lp = all_lwps.head; lp; lp = lp->next)
425 {
426 struct lwp_info *lwp = (struct lwp_info *) lp;
427
428 /* The actual update is done later, we just mark that the register
429 needs updating. */
430 if (pid_of (lwp) == pid)
431 lwp->arch_private->debug_registers_changed = 1;
432 }
433 }
434
435 /* Update the inferior's DR7 debug control register from STATE. */
436
437 void
438 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
439 {
440 struct inferior_list_entry *lp;
441 /* Only need to update the threads of this process. */
442 int pid = pid_of (get_thread_lwp (current_inferior));
443
444 for (lp = all_lwps.head; lp; lp = lp->next)
445 {
446 struct lwp_info *lwp = (struct lwp_info *) lp;
447
448 /* The actual update is done later, we just mark that the register
449 needs updating. */
450 if (pid_of (lwp) == pid)
451 lwp->arch_private->debug_registers_changed = 1;
452 }
453 }
454
455 /* Get the value of the DR6 debug status register from the inferior
456 and record it in STATE. */
457
458 void
459 i386_dr_low_get_status (struct i386_debug_reg_state *state)
460 {
461 struct lwp_info *lwp = get_thread_lwp (current_inferior);
462 ptid_t ptid = ptid_of (lwp);
463
464 state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS);
465 }
466 \f
467 /* Watchpoint support. */
468
469 static int
470 x86_insert_point (char type, CORE_ADDR addr, int len)
471 {
472 struct process_info *proc = current_process ();
473 switch (type)
474 {
475 case '0':
476 return set_gdb_breakpoint_at (addr);
477 case '2':
478 case '3':
479 case '4':
480 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
481 type, addr, len);
482 default:
483 /* Unsupported. */
484 return 1;
485 }
486 }
487
488 static int
489 x86_remove_point (char type, CORE_ADDR addr, int len)
490 {
491 struct process_info *proc = current_process ();
492 switch (type)
493 {
494 case '0':
495 return delete_gdb_breakpoint_at (addr);
496 case '2':
497 case '3':
498 case '4':
499 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
500 type, addr, len);
501 default:
502 /* Unsupported. */
503 return 1;
504 }
505 }
506
507 static int
508 x86_stopped_by_watchpoint (void)
509 {
510 struct process_info *proc = current_process ();
511 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
512 }
513
514 static CORE_ADDR
515 x86_stopped_data_address (void)
516 {
517 struct process_info *proc = current_process ();
518 CORE_ADDR addr;
519 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
520 &addr))
521 return addr;
522 return 0;
523 }
524 \f
525 /* Called when a new process is created. */
526
527 static struct arch_process_info *
528 x86_linux_new_process (void)
529 {
530 struct arch_process_info *info = xcalloc (1, sizeof (*info));
531
532 i386_low_init_dregs (&info->debug_reg_state);
533
534 return info;
535 }
536
537 /* Called when a new thread is detected. */
538
539 static struct arch_lwp_info *
540 x86_linux_new_thread (void)
541 {
542 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
543
544 info->debug_registers_changed = 1;
545
546 return info;
547 }
548
549 /* Called when resuming a thread.
550 If the debug regs have changed, update the thread's copies. */
551
552 static void
553 x86_linux_prepare_to_resume (struct lwp_info *lwp)
554 {
555 ptid_t ptid = ptid_of (lwp);
556
557 if (lwp->arch_private->debug_registers_changed)
558 {
559 int i;
560 int pid = ptid_get_pid (ptid);
561 struct process_info *proc = find_process_pid (pid);
562 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
563
564 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
565 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
566
567 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
568
569 lwp->arch_private->debug_registers_changed = 0;
570 }
571
572 if (lwp->stopped_by_watchpoint)
573 x86_linux_dr_set (ptid, DR_STATUS, 0);
574 }
575 \f
576 /* When GDBSERVER is built as a 64-bit application on linux, the
577 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
578 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
579 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
580 conversion in-place ourselves. */
581
582 /* These types below (compat_*) define a siginfo type that is layout
583 compatible with the siginfo type exported by the 32-bit userspace
584 support. */
585
586 #ifdef __x86_64__
587
588 typedef int compat_int_t;
589 typedef unsigned int compat_uptr_t;
590
591 typedef int compat_time_t;
592 typedef int compat_timer_t;
593 typedef int compat_clock_t;
594
595 struct compat_timeval
596 {
597 compat_time_t tv_sec;
598 int tv_usec;
599 };
600
601 typedef union compat_sigval
602 {
603 compat_int_t sival_int;
604 compat_uptr_t sival_ptr;
605 } compat_sigval_t;
606
607 typedef struct compat_siginfo
608 {
609 int si_signo;
610 int si_errno;
611 int si_code;
612
613 union
614 {
615 int _pad[((128 / sizeof (int)) - 3)];
616
617 /* kill() */
618 struct
619 {
620 unsigned int _pid;
621 unsigned int _uid;
622 } _kill;
623
624 /* POSIX.1b timers */
625 struct
626 {
627 compat_timer_t _tid;
628 int _overrun;
629 compat_sigval_t _sigval;
630 } _timer;
631
632 /* POSIX.1b signals */
633 struct
634 {
635 unsigned int _pid;
636 unsigned int _uid;
637 compat_sigval_t _sigval;
638 } _rt;
639
640 /* SIGCHLD */
641 struct
642 {
643 unsigned int _pid;
644 unsigned int _uid;
645 int _status;
646 compat_clock_t _utime;
647 compat_clock_t _stime;
648 } _sigchld;
649
650 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
651 struct
652 {
653 unsigned int _addr;
654 } _sigfault;
655
656 /* SIGPOLL */
657 struct
658 {
659 int _band;
660 int _fd;
661 } _sigpoll;
662 } _sifields;
663 } compat_siginfo_t;
664
665 #define cpt_si_pid _sifields._kill._pid
666 #define cpt_si_uid _sifields._kill._uid
667 #define cpt_si_timerid _sifields._timer._tid
668 #define cpt_si_overrun _sifields._timer._overrun
669 #define cpt_si_status _sifields._sigchld._status
670 #define cpt_si_utime _sifields._sigchld._utime
671 #define cpt_si_stime _sifields._sigchld._stime
672 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
673 #define cpt_si_addr _sifields._sigfault._addr
674 #define cpt_si_band _sifields._sigpoll._band
675 #define cpt_si_fd _sifields._sigpoll._fd
676
677 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
678 In their place is si_timer1,si_timer2. */
679 #ifndef si_timerid
680 #define si_timerid si_timer1
681 #endif
682 #ifndef si_overrun
683 #define si_overrun si_timer2
684 #endif
685
686 static void
687 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
688 {
689 memset (to, 0, sizeof (*to));
690
691 to->si_signo = from->si_signo;
692 to->si_errno = from->si_errno;
693 to->si_code = from->si_code;
694
695 if (to->si_code < 0)
696 {
697 to->cpt_si_ptr = (intptr_t) from->si_ptr;
698 }
699 else if (to->si_code == SI_USER)
700 {
701 to->cpt_si_pid = from->si_pid;
702 to->cpt_si_uid = from->si_uid;
703 }
704 else if (to->si_code == SI_TIMER)
705 {
706 to->cpt_si_timerid = from->si_timerid;
707 to->cpt_si_overrun = from->si_overrun;
708 to->cpt_si_ptr = (intptr_t) from->si_ptr;
709 }
710 else
711 {
712 switch (to->si_signo)
713 {
714 case SIGCHLD:
715 to->cpt_si_pid = from->si_pid;
716 to->cpt_si_uid = from->si_uid;
717 to->cpt_si_status = from->si_status;
718 to->cpt_si_utime = from->si_utime;
719 to->cpt_si_stime = from->si_stime;
720 break;
721 case SIGILL:
722 case SIGFPE:
723 case SIGSEGV:
724 case SIGBUS:
725 to->cpt_si_addr = (intptr_t) from->si_addr;
726 break;
727 case SIGPOLL:
728 to->cpt_si_band = from->si_band;
729 to->cpt_si_fd = from->si_fd;
730 break;
731 default:
732 to->cpt_si_pid = from->si_pid;
733 to->cpt_si_uid = from->si_uid;
734 to->cpt_si_ptr = (intptr_t) from->si_ptr;
735 break;
736 }
737 }
738 }
739
740 static void
741 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
742 {
743 memset (to, 0, sizeof (*to));
744
745 to->si_signo = from->si_signo;
746 to->si_errno = from->si_errno;
747 to->si_code = from->si_code;
748
749 if (to->si_code < 0)
750 {
751 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
752 }
753 else if (to->si_code == SI_USER)
754 {
755 to->si_pid = from->cpt_si_pid;
756 to->si_uid = from->cpt_si_uid;
757 }
758 else if (to->si_code == SI_TIMER)
759 {
760 to->si_timerid = from->cpt_si_timerid;
761 to->si_overrun = from->cpt_si_overrun;
762 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
763 }
764 else
765 {
766 switch (to->si_signo)
767 {
768 case SIGCHLD:
769 to->si_pid = from->cpt_si_pid;
770 to->si_uid = from->cpt_si_uid;
771 to->si_status = from->cpt_si_status;
772 to->si_utime = from->cpt_si_utime;
773 to->si_stime = from->cpt_si_stime;
774 break;
775 case SIGILL:
776 case SIGFPE:
777 case SIGSEGV:
778 case SIGBUS:
779 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
780 break;
781 case SIGPOLL:
782 to->si_band = from->cpt_si_band;
783 to->si_fd = from->cpt_si_fd;
784 break;
785 default:
786 to->si_pid = from->cpt_si_pid;
787 to->si_uid = from->cpt_si_uid;
788 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
789 break;
790 }
791 }
792 }
793
794 #endif /* __x86_64__ */
795
796 /* Convert a native/host siginfo object, into/from the siginfo in the
797 layout of the inferiors' architecture. Returns true if any
798 conversion was done; false otherwise. If DIRECTION is 1, then copy
799 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
800 INF. */
801
802 static int
803 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
804 {
805 #ifdef __x86_64__
806 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
807 if (register_size (0) == 4)
808 {
809 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
810 fatal ("unexpected difference in siginfo");
811
812 if (direction == 0)
813 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
814 else
815 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
816
817 return 1;
818 }
819 #endif
820
821 return 0;
822 }
823 \f
824 static int use_xml;
825
826 /* Update gdbserver_xmltarget. */
827
828 static void
829 x86_linux_update_xmltarget (void)
830 {
831 static unsigned long long xcr0;
832 static int have_ptrace_getregset = -1;
833
834 if (!current_inferior)
835 return;
836
837 #ifdef __x86_64__
838 if (num_xmm_registers == 8)
839 init_registers_i386_linux ();
840 else
841 init_registers_amd64_linux ();
842 #else
843 init_registers_i386_linux ();
844 #endif
845
846 if (!use_xml)
847 {
848 /* Don't use XML. */
849 #ifdef __x86_64__
850 if (num_xmm_registers == 8)
851 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
852 else
853 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
854 #else
855 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
856 #endif
857
858 x86_xcr0 = I386_XSTATE_SSE_MASK;
859
860 return;
861 }
862
863 /* Check if XSAVE extended state is supported. */
864 if (have_ptrace_getregset == -1)
865 {
866 int pid = pid_of (get_thread_lwp (current_inferior));
867 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
868 struct iovec iov;
869 struct regset_info *regset;
870
871 iov.iov_base = xstateregs;
872 iov.iov_len = sizeof (xstateregs);
873
874 /* Check if PTRACE_GETREGSET works. */
875 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
876 &iov) < 0)
877 {
878 have_ptrace_getregset = 0;
879 return;
880 }
881 else
882 have_ptrace_getregset = 1;
883
884 /* Get XCR0 from XSAVE extended state at byte 464. */
885 xcr0 = xstateregs[464 / sizeof (long long)];
886
887 /* Use PTRACE_GETREGSET if it is available. */
888 for (regset = target_regsets;
889 regset->fill_function != NULL; regset++)
890 if (regset->get_request == PTRACE_GETREGSET)
891 regset->size = I386_XSTATE_SIZE (xcr0);
892 else if (regset->type != GENERAL_REGS)
893 regset->size = 0;
894 }
895
896 if (have_ptrace_getregset)
897 {
898 /* AVX is the highest feature we support. */
899 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
900 {
901 x86_xcr0 = xcr0;
902
903 #ifdef __x86_64__
904 /* I386 has 8 xmm regs. */
905 if (num_xmm_registers == 8)
906 init_registers_i386_avx_linux ();
907 else
908 init_registers_amd64_avx_linux ();
909 #else
910 init_registers_i386_avx_linux ();
911 #endif
912 }
913 }
914 }
915
916 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
917 PTRACE_GETREGSET. */
918
919 static void
920 x86_linux_process_qsupported (const char *query)
921 {
922 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
923 with "i386" in qSupported query, it supports x86 XML target
924 descriptions. */
925 use_xml = 0;
926 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
927 {
928 char *copy = xstrdup (query + 13);
929 char *p;
930
931 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
932 {
933 if (strcmp (p, "i386") == 0)
934 {
935 use_xml = 1;
936 break;
937 }
938 }
939
940 free (copy);
941 }
942
943 x86_linux_update_xmltarget ();
944 }
945
946 /* Initialize gdbserver for the architecture of the inferior. */
947
948 static void
949 x86_arch_setup (void)
950 {
951 #ifdef __x86_64__
952 int pid = pid_of (get_thread_lwp (current_inferior));
953 char *file = linux_child_pid_to_exec_file (pid);
954 int use_64bit = elf_64_file_p (file);
955
956 free (file);
957
958 if (use_64bit < 0)
959 {
960 /* This can only happen if /proc/<pid>/exe is unreadable,
961 but "that can't happen" if we've gotten this far.
962 Fall through and assume this is a 32-bit program. */
963 }
964 else if (use_64bit)
965 {
966 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
967 the_low_target.num_regs = -1;
968 the_low_target.regmap = NULL;
969 the_low_target.cannot_fetch_register = NULL;
970 the_low_target.cannot_store_register = NULL;
971
972 /* Amd64 has 16 xmm regs. */
973 num_xmm_registers = 16;
974
975 x86_linux_update_xmltarget ();
976 return;
977 }
978 #endif
979
980 /* Ok we have a 32-bit inferior. */
981
982 the_low_target.num_regs = I386_NUM_REGS;
983 the_low_target.regmap = i386_regmap;
984 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
985 the_low_target.cannot_store_register = i386_cannot_store_register;
986
987 /* I386 has 8 xmm regs. */
988 num_xmm_registers = 8;
989
990 x86_linux_update_xmltarget ();
991 }
992
993 /* This is initialized assuming an amd64 target.
994 x86_arch_setup will correct it for i386 or amd64 targets. */
995
996 struct linux_target_ops the_low_target =
997 {
998 x86_arch_setup,
999 -1,
1000 NULL,
1001 NULL,
1002 NULL,
1003 x86_get_pc,
1004 x86_set_pc,
1005 x86_breakpoint,
1006 x86_breakpoint_len,
1007 NULL,
1008 1,
1009 x86_breakpoint_at,
1010 x86_insert_point,
1011 x86_remove_point,
1012 x86_stopped_by_watchpoint,
1013 x86_stopped_data_address,
1014 /* collect_ptrace_register/supply_ptrace_register are not needed in the
1015 native i386 case (no registers smaller than an xfer unit), and are not
1016 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
1017 NULL,
1018 NULL,
1019 /* need to fix up i386 siginfo if host is amd64 */
1020 x86_siginfo_fixup,
1021 x86_linux_new_process,
1022 x86_linux_new_thread,
1023 x86_linux_prepare_to_resume,
1024 x86_linux_process_qsupported
1025 };
This page took 0.052981 seconds and 4 git commands to generate.