Add x86_debug_reg_state to gdbserver
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41
42 #ifdef __x86_64__
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc *tdesc_amd64_linux;
46
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc *tdesc_amd64_avx_linux;
50
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc *tdesc_amd64_avx512_linux;
54
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc *tdesc_amd64_mpx_linux;
58
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc *tdesc_x32_linux;
62
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc *tdesc_x32_avx_linux;
66
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc *tdesc_x32_avx512_linux;
70
71 #endif
72
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc *tdesc_i386_linux;
76
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc *tdesc_i386_mmx_linux;
80
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc *tdesc_i386_avx_linux;
84
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc *tdesc_i386_avx512_linux;
88
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc *tdesc_i386_mpx_linux;
92
93 #ifdef __x86_64__
94 static struct target_desc *tdesc_amd64_linux_no_xml;
95 #endif
96 static struct target_desc *tdesc_i386_linux_no_xml;
97
98
99 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
101
102 /* Backward compatibility for gdb without XML support. */
103
104 static const char *xmltarget_i386_linux_no_xml = "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
107 </target>";
108
109 #ifdef __x86_64__
110 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
113 </target>";
114 #endif
115
116 #include <sys/reg.h>
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
119 #include <sys/uio.h>
120
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
123 #endif
124
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
127 #endif
128
129
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
132 #endif
133
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
137 #endif
138
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141 #ifndef ARCH_GET_FS
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
146 #endif
147
148 /* Per-process arch-specific data we want to keep. */
149
150 struct arch_process_info
151 {
152 struct x86_debug_reg_state debug_reg_state;
153 };
154
155 /* Per-thread arch-specific data we want to keep. */
156
157 struct arch_lwp_info
158 {
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161 };
162
163 #ifdef __x86_64__
164
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap[] =
169 {
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174 };
175
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
180 #define REGSIZE 8
181
182 static const int x86_64_regmap[] =
183 {
184 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
185 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
186 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
187 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
188 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
189 DS * 8, ES * 8, FS * 8, GS * 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 ORIG_RAX * 8,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
207 };
208
209 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #define X86_64_USER_REGS (GS + 1)
211
212 #else /* ! __x86_64__ */
213
214 /* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216 static /*const*/ int i386_regmap[] =
217 {
218 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
219 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
220 EIP * 4, EFL * 4, CS * 4, SS * 4,
221 DS * 4, ES * 4, FS * 4, GS * 4
222 };
223
224 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225
226 #define REGSIZE 4
227
228 #endif
229
230 #ifdef __x86_64__
231
232 /* Returns true if the current inferior belongs to a x86-64 process,
233 per the tdesc. */
234
235 static int
236 is_64bit_tdesc (void)
237 {
238 struct regcache *regcache = get_thread_regcache (current_thread, 0);
239
240 return register_size (regcache->tdesc, 0) == 8;
241 }
242
243 #endif
244
245 \f
246 /* Called by libthread_db. */
247
248 ps_err_e
249 ps_get_thread_area (const struct ps_prochandle *ph,
250 lwpid_t lwpid, int idx, void **base)
251 {
252 #ifdef __x86_64__
253 int use_64bit = is_64bit_tdesc ();
254
255 if (use_64bit)
256 {
257 switch (idx)
258 {
259 case FS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
261 return PS_OK;
262 break;
263 case GS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
265 return PS_OK;
266 break;
267 default:
268 return PS_BADADDR;
269 }
270 return PS_ERR;
271 }
272 #endif
273
274 {
275 unsigned int desc[4];
276
277 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
278 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
279 return PS_ERR;
280
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base = (void *) (uintptr_t) desc[1];
283 return PS_OK;
284 }
285 }
286
287 /* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
291
292 static int
293 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
294 {
295 #ifdef __x86_64__
296 int use_64bit = is_64bit_tdesc ();
297
298 if (use_64bit)
299 {
300 void *base;
301 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
302 {
303 *addr = (CORE_ADDR) (uintptr_t) base;
304 return 0;
305 }
306
307 return -1;
308 }
309 #endif
310
311 {
312 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
313 struct thread_info *thr = get_lwp_thread (lwp);
314 struct regcache *regcache = get_thread_regcache (thr, 1);
315 unsigned int desc[4];
316 ULONGEST gs = 0;
317 const int reg_thread_area = 3; /* bits to scale down register value. */
318 int idx;
319
320 collect_register_by_name (regcache, "gs", &gs);
321
322 idx = gs >> reg_thread_area;
323
324 if (ptrace (PTRACE_GET_THREAD_AREA,
325 lwpid_of (thr),
326 (void *) (long) idx, (unsigned long) &desc) < 0)
327 return -1;
328
329 *addr = desc[1];
330 return 0;
331 }
332 }
333
334
335 \f
336 static int
337 x86_cannot_store_register (int regno)
338 {
339 #ifdef __x86_64__
340 if (is_64bit_tdesc ())
341 return 0;
342 #endif
343
344 return regno >= I386_NUM_REGS;
345 }
346
347 static int
348 x86_cannot_fetch_register (int regno)
349 {
350 #ifdef __x86_64__
351 if (is_64bit_tdesc ())
352 return 0;
353 #endif
354
355 return regno >= I386_NUM_REGS;
356 }
357
358 static void
359 x86_fill_gregset (struct regcache *regcache, void *buf)
360 {
361 int i;
362
363 #ifdef __x86_64__
364 if (register_size (regcache->tdesc, 0) == 8)
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
368 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
369 return;
370 }
371
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf, 0x00, X86_64_USER_REGS * 8);
375 #endif
376
377 for (i = 0; i < I386_NUM_REGS; i++)
378 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
379
380 collect_register_by_name (regcache, "orig_eax",
381 ((char *) buf) + ORIG_EAX * REGSIZE);
382 }
383
384 static void
385 x86_store_gregset (struct regcache *regcache, const void *buf)
386 {
387 int i;
388
389 #ifdef __x86_64__
390 if (register_size (regcache->tdesc, 0) == 8)
391 {
392 for (i = 0; i < X86_64_NUM_REGS; i++)
393 if (x86_64_regmap[i] != -1)
394 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
395 return;
396 }
397 #endif
398
399 for (i = 0; i < I386_NUM_REGS; i++)
400 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
401
402 supply_register_by_name (regcache, "orig_eax",
403 ((char *) buf) + ORIG_EAX * REGSIZE);
404 }
405
406 static void
407 x86_fill_fpregset (struct regcache *regcache, void *buf)
408 {
409 #ifdef __x86_64__
410 i387_cache_to_fxsave (regcache, buf);
411 #else
412 i387_cache_to_fsave (regcache, buf);
413 #endif
414 }
415
416 static void
417 x86_store_fpregset (struct regcache *regcache, const void *buf)
418 {
419 #ifdef __x86_64__
420 i387_fxsave_to_cache (regcache, buf);
421 #else
422 i387_fsave_to_cache (regcache, buf);
423 #endif
424 }
425
426 #ifndef __x86_64__
427
428 static void
429 x86_fill_fpxregset (struct regcache *regcache, void *buf)
430 {
431 i387_cache_to_fxsave (regcache, buf);
432 }
433
434 static void
435 x86_store_fpxregset (struct regcache *regcache, const void *buf)
436 {
437 i387_fxsave_to_cache (regcache, buf);
438 }
439
440 #endif
441
442 static void
443 x86_fill_xstateregset (struct regcache *regcache, void *buf)
444 {
445 i387_cache_to_xsave (regcache, buf);
446 }
447
448 static void
449 x86_store_xstateregset (struct regcache *regcache, const void *buf)
450 {
451 i387_xsave_to_cache (regcache, buf);
452 }
453
454 /* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
459 and update the supported regsets accordingly. */
460
461 static struct regset_info x86_regsets[] =
462 {
463 #ifdef HAVE_PTRACE_GETREGS
464 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
465 GENERAL_REGS,
466 x86_fill_gregset, x86_store_gregset },
467 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
468 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
469 # ifndef __x86_64__
470 # ifdef HAVE_PTRACE_GETFPXREGS
471 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
472 EXTENDED_REGS,
473 x86_fill_fpxregset, x86_store_fpxregset },
474 # endif
475 # endif
476 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
477 FP_REGS,
478 x86_fill_fpregset, x86_store_fpregset },
479 #endif /* HAVE_PTRACE_GETREGS */
480 { 0, 0, 0, -1, -1, NULL, NULL }
481 };
482
483 static CORE_ADDR
484 x86_get_pc (struct regcache *regcache)
485 {
486 int use_64bit = register_size (regcache->tdesc, 0) == 8;
487
488 if (use_64bit)
489 {
490 unsigned long pc;
491 collect_register_by_name (regcache, "rip", &pc);
492 return (CORE_ADDR) pc;
493 }
494 else
495 {
496 unsigned int pc;
497 collect_register_by_name (regcache, "eip", &pc);
498 return (CORE_ADDR) pc;
499 }
500 }
501
502 static void
503 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
504 {
505 int use_64bit = register_size (regcache->tdesc, 0) == 8;
506
507 if (use_64bit)
508 {
509 unsigned long newpc = pc;
510 supply_register_by_name (regcache, "rip", &newpc);
511 }
512 else
513 {
514 unsigned int newpc = pc;
515 supply_register_by_name (regcache, "eip", &newpc);
516 }
517 }
518 \f
519 static const unsigned char x86_breakpoint[] = { 0xCC };
520 #define x86_breakpoint_len 1
521
522 static int
523 x86_breakpoint_at (CORE_ADDR pc)
524 {
525 unsigned char c;
526
527 (*the_target->read_memory) (pc, &c, 1);
528 if (c == 0xCC)
529 return 1;
530
531 return 0;
532 }
533 \f
534
535 /* Return the offset of REGNUM in the u_debugreg field of struct
536 user. */
537
538 static int
539 u_debugreg_offset (int regnum)
540 {
541 return (offsetof (struct user, u_debugreg)
542 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
543 }
544
545
546 /* Support for debug registers. */
547
548 static unsigned long
549 x86_linux_dr_get (ptid_t ptid, int regnum)
550 {
551 int tid;
552 unsigned long value;
553
554 tid = ptid_get_lwp (ptid);
555
556 errno = 0;
557 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
558 if (errno != 0)
559 error ("Couldn't read debug register");
560
561 return value;
562 }
563
564 static void
565 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
566 {
567 int tid;
568
569 tid = ptid_get_lwp (ptid);
570
571 errno = 0;
572 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
573 if (errno != 0)
574 error ("Couldn't write debug register");
575 }
576
577 static int
578 update_debug_registers_callback (struct inferior_list_entry *entry,
579 void *pid_p)
580 {
581 struct thread_info *thr = (struct thread_info *) entry;
582 struct lwp_info *lwp = get_thread_lwp (thr);
583 int pid = *(int *) pid_p;
584
585 /* Only update the threads of this process. */
586 if (pid_of (thr) == pid)
587 {
588 /* The actual update is done later just before resuming the lwp,
589 we just mark that the registers need updating. */
590 lwp->arch_private->debug_registers_changed = 1;
591
592 /* If the lwp isn't stopped, force it to momentarily pause, so
593 we can update its debug registers. */
594 if (!lwp->stopped)
595 linux_stop_lwp (lwp);
596 }
597
598 return 0;
599 }
600
601 /* Update the inferior's debug register REGNUM from STATE. */
602
603 static void
604 x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
605 {
606 /* Only update the threads of this process. */
607 int pid = pid_of (current_thread);
608
609 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
610
611 find_inferior (&all_threads, update_debug_registers_callback, &pid);
612 }
613
614 /* Return the inferior's debug register REGNUM. */
615
616 static CORE_ADDR
617 x86_dr_low_get_addr (int regnum)
618 {
619 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
620
621 return x86_linux_dr_get (current_lwp_ptid (), regnum);
622 }
623
624 /* Update the inferior's DR7 debug control register from STATE. */
625
626 static void
627 x86_dr_low_set_control (unsigned long control)
628 {
629 /* Only update the threads of this process. */
630 int pid = pid_of (current_thread);
631
632 find_inferior (&all_threads, update_debug_registers_callback, &pid);
633 }
634
635 /* Return the inferior's DR7 debug control register. */
636
637 static unsigned long
638 x86_dr_low_get_control (void)
639 {
640 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
641 }
642
643 /* Get the value of the DR6 debug status register from the inferior
644 and record it in STATE. */
645
646 static unsigned long
647 x86_dr_low_get_status (void)
648 {
649 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
650 }
651
652 /* Low-level function vector. */
653 struct x86_dr_low_type x86_dr_low =
654 {
655 x86_dr_low_set_control,
656 x86_dr_low_set_addr,
657 x86_dr_low_get_addr,
658 x86_dr_low_get_status,
659 x86_dr_low_get_control,
660 sizeof (void *),
661 };
662 \f
663 /* Breakpoint/Watchpoint support. */
664
665 static int
666 x86_supports_z_point_type (char z_type)
667 {
668 switch (z_type)
669 {
670 case Z_PACKET_SW_BP:
671 case Z_PACKET_HW_BP:
672 case Z_PACKET_WRITE_WP:
673 case Z_PACKET_ACCESS_WP:
674 return 1;
675 default:
676 return 0;
677 }
678 }
679
680 static int
681 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
682 int size, struct raw_breakpoint *bp)
683 {
684 struct process_info *proc = current_process ();
685
686 switch (type)
687 {
688 case raw_bkpt_type_sw:
689 return insert_memory_breakpoint (bp);
690
691 case raw_bkpt_type_hw:
692 case raw_bkpt_type_write_wp:
693 case raw_bkpt_type_access_wp:
694 {
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type);
697 struct x86_debug_reg_state *state
698 = &proc->priv->arch_private->debug_reg_state;
699
700 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
701 }
702
703 default:
704 /* Unsupported. */
705 return 1;
706 }
707 }
708
709 static int
710 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
711 int size, struct raw_breakpoint *bp)
712 {
713 struct process_info *proc = current_process ();
714
715 switch (type)
716 {
717 case raw_bkpt_type_sw:
718 return remove_memory_breakpoint (bp);
719
720 case raw_bkpt_type_hw:
721 case raw_bkpt_type_write_wp:
722 case raw_bkpt_type_access_wp:
723 {
724 enum target_hw_bp_type hw_type
725 = raw_bkpt_type_to_target_hw_bp_type (type);
726 struct x86_debug_reg_state *state
727 = &proc->priv->arch_private->debug_reg_state;
728
729 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
730 }
731 default:
732 /* Unsupported. */
733 return 1;
734 }
735 }
736
737 static int
738 x86_stopped_by_watchpoint (void)
739 {
740 struct process_info *proc = current_process ();
741 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
742 }
743
744 static CORE_ADDR
745 x86_stopped_data_address (void)
746 {
747 struct process_info *proc = current_process ();
748 CORE_ADDR addr;
749 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
750 &addr))
751 return addr;
752 return 0;
753 }
754 \f
755 /* Called when a new process is created. */
756
757 static struct arch_process_info *
758 x86_linux_new_process (void)
759 {
760 struct arch_process_info *info = XCNEW (struct arch_process_info);
761
762 x86_low_init_dregs (&info->debug_reg_state);
763
764 return info;
765 }
766
767 /* Called when a new thread is detected. */
768
769 static struct arch_lwp_info *
770 x86_linux_new_thread (void)
771 {
772 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
773
774 info->debug_registers_changed = 1;
775
776 return info;
777 }
778
779 /* See nat/x86-dregs.h. */
780
781 struct x86_debug_reg_state *
782 x86_debug_reg_state (pid_t pid)
783 {
784 struct process_info *proc = find_process_pid (pid);
785
786 return &proc->priv->arch_private->debug_reg_state;
787 }
788
789 /* Called when resuming a thread.
790 If the debug regs have changed, update the thread's copies. */
791
792 static void
793 x86_linux_prepare_to_resume (struct lwp_info *lwp)
794 {
795 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
796 int clear_status = 0;
797
798 if (lwp->arch_private->debug_registers_changed)
799 {
800 struct x86_debug_reg_state *state
801 = x86_debug_reg_state (ptid_get_pid (ptid));
802 int i;
803
804 x86_linux_dr_set (ptid, DR_CONTROL, 0);
805
806 ALL_DEBUG_ADDRESS_REGISTERS (i)
807 if (state->dr_ref_count[i] > 0)
808 {
809 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
810
811 /* If we're setting a watchpoint, any change the inferior
812 had done itself to the debug registers needs to be
813 discarded, otherwise, x86_dr_stopped_data_address can
814 get confused. */
815 clear_status = 1;
816 }
817
818 if (state->dr_control_mirror != 0)
819 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
820
821 lwp->arch_private->debug_registers_changed = 0;
822 }
823
824 if (clear_status || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
825 x86_linux_dr_set (ptid, DR_STATUS, 0);
826 }
827 \f
828 /* When GDBSERVER is built as a 64-bit application on linux, the
829 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
830 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
831 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
832 conversion in-place ourselves. */
833
834 /* These types below (compat_*) define a siginfo type that is layout
835 compatible with the siginfo type exported by the 32-bit userspace
836 support. */
837
838 #ifdef __x86_64__
839
840 typedef int compat_int_t;
841 typedef unsigned int compat_uptr_t;
842
843 typedef int compat_time_t;
844 typedef int compat_timer_t;
845 typedef int compat_clock_t;
846
847 struct compat_timeval
848 {
849 compat_time_t tv_sec;
850 int tv_usec;
851 };
852
853 typedef union compat_sigval
854 {
855 compat_int_t sival_int;
856 compat_uptr_t sival_ptr;
857 } compat_sigval_t;
858
859 typedef struct compat_siginfo
860 {
861 int si_signo;
862 int si_errno;
863 int si_code;
864
865 union
866 {
867 int _pad[((128 / sizeof (int)) - 3)];
868
869 /* kill() */
870 struct
871 {
872 unsigned int _pid;
873 unsigned int _uid;
874 } _kill;
875
876 /* POSIX.1b timers */
877 struct
878 {
879 compat_timer_t _tid;
880 int _overrun;
881 compat_sigval_t _sigval;
882 } _timer;
883
884 /* POSIX.1b signals */
885 struct
886 {
887 unsigned int _pid;
888 unsigned int _uid;
889 compat_sigval_t _sigval;
890 } _rt;
891
892 /* SIGCHLD */
893 struct
894 {
895 unsigned int _pid;
896 unsigned int _uid;
897 int _status;
898 compat_clock_t _utime;
899 compat_clock_t _stime;
900 } _sigchld;
901
902 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
903 struct
904 {
905 unsigned int _addr;
906 } _sigfault;
907
908 /* SIGPOLL */
909 struct
910 {
911 int _band;
912 int _fd;
913 } _sigpoll;
914 } _sifields;
915 } compat_siginfo_t;
916
917 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
918 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
919
920 typedef struct compat_x32_siginfo
921 {
922 int si_signo;
923 int si_errno;
924 int si_code;
925
926 union
927 {
928 int _pad[((128 / sizeof (int)) - 3)];
929
930 /* kill() */
931 struct
932 {
933 unsigned int _pid;
934 unsigned int _uid;
935 } _kill;
936
937 /* POSIX.1b timers */
938 struct
939 {
940 compat_timer_t _tid;
941 int _overrun;
942 compat_sigval_t _sigval;
943 } _timer;
944
945 /* POSIX.1b signals */
946 struct
947 {
948 unsigned int _pid;
949 unsigned int _uid;
950 compat_sigval_t _sigval;
951 } _rt;
952
953 /* SIGCHLD */
954 struct
955 {
956 unsigned int _pid;
957 unsigned int _uid;
958 int _status;
959 compat_x32_clock_t _utime;
960 compat_x32_clock_t _stime;
961 } _sigchld;
962
963 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
964 struct
965 {
966 unsigned int _addr;
967 } _sigfault;
968
969 /* SIGPOLL */
970 struct
971 {
972 int _band;
973 int _fd;
974 } _sigpoll;
975 } _sifields;
976 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
977
978 #define cpt_si_pid _sifields._kill._pid
979 #define cpt_si_uid _sifields._kill._uid
980 #define cpt_si_timerid _sifields._timer._tid
981 #define cpt_si_overrun _sifields._timer._overrun
982 #define cpt_si_status _sifields._sigchld._status
983 #define cpt_si_utime _sifields._sigchld._utime
984 #define cpt_si_stime _sifields._sigchld._stime
985 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
986 #define cpt_si_addr _sifields._sigfault._addr
987 #define cpt_si_band _sifields._sigpoll._band
988 #define cpt_si_fd _sifields._sigpoll._fd
989
990 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
991 In their place is si_timer1,si_timer2. */
992 #ifndef si_timerid
993 #define si_timerid si_timer1
994 #endif
995 #ifndef si_overrun
996 #define si_overrun si_timer2
997 #endif
998
999 static void
1000 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
1001 {
1002 memset (to, 0, sizeof (*to));
1003
1004 to->si_signo = from->si_signo;
1005 to->si_errno = from->si_errno;
1006 to->si_code = from->si_code;
1007
1008 if (to->si_code == SI_TIMER)
1009 {
1010 to->cpt_si_timerid = from->si_timerid;
1011 to->cpt_si_overrun = from->si_overrun;
1012 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1013 }
1014 else if (to->si_code == SI_USER)
1015 {
1016 to->cpt_si_pid = from->si_pid;
1017 to->cpt_si_uid = from->si_uid;
1018 }
1019 else if (to->si_code < 0)
1020 {
1021 to->cpt_si_pid = from->si_pid;
1022 to->cpt_si_uid = from->si_uid;
1023 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1024 }
1025 else
1026 {
1027 switch (to->si_signo)
1028 {
1029 case SIGCHLD:
1030 to->cpt_si_pid = from->si_pid;
1031 to->cpt_si_uid = from->si_uid;
1032 to->cpt_si_status = from->si_status;
1033 to->cpt_si_utime = from->si_utime;
1034 to->cpt_si_stime = from->si_stime;
1035 break;
1036 case SIGILL:
1037 case SIGFPE:
1038 case SIGSEGV:
1039 case SIGBUS:
1040 to->cpt_si_addr = (intptr_t) from->si_addr;
1041 break;
1042 case SIGPOLL:
1043 to->cpt_si_band = from->si_band;
1044 to->cpt_si_fd = from->si_fd;
1045 break;
1046 default:
1047 to->cpt_si_pid = from->si_pid;
1048 to->cpt_si_uid = from->si_uid;
1049 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1050 break;
1051 }
1052 }
1053 }
1054
1055 static void
1056 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1057 {
1058 memset (to, 0, sizeof (*to));
1059
1060 to->si_signo = from->si_signo;
1061 to->si_errno = from->si_errno;
1062 to->si_code = from->si_code;
1063
1064 if (to->si_code == SI_TIMER)
1065 {
1066 to->si_timerid = from->cpt_si_timerid;
1067 to->si_overrun = from->cpt_si_overrun;
1068 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1069 }
1070 else if (to->si_code == SI_USER)
1071 {
1072 to->si_pid = from->cpt_si_pid;
1073 to->si_uid = from->cpt_si_uid;
1074 }
1075 else if (to->si_code < 0)
1076 {
1077 to->si_pid = from->cpt_si_pid;
1078 to->si_uid = from->cpt_si_uid;
1079 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1080 }
1081 else
1082 {
1083 switch (to->si_signo)
1084 {
1085 case SIGCHLD:
1086 to->si_pid = from->cpt_si_pid;
1087 to->si_uid = from->cpt_si_uid;
1088 to->si_status = from->cpt_si_status;
1089 to->si_utime = from->cpt_si_utime;
1090 to->si_stime = from->cpt_si_stime;
1091 break;
1092 case SIGILL:
1093 case SIGFPE:
1094 case SIGSEGV:
1095 case SIGBUS:
1096 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1097 break;
1098 case SIGPOLL:
1099 to->si_band = from->cpt_si_band;
1100 to->si_fd = from->cpt_si_fd;
1101 break;
1102 default:
1103 to->si_pid = from->cpt_si_pid;
1104 to->si_uid = from->cpt_si_uid;
1105 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1106 break;
1107 }
1108 }
1109 }
1110
1111 static void
1112 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1113 siginfo_t *from)
1114 {
1115 memset (to, 0, sizeof (*to));
1116
1117 to->si_signo = from->si_signo;
1118 to->si_errno = from->si_errno;
1119 to->si_code = from->si_code;
1120
1121 if (to->si_code == SI_TIMER)
1122 {
1123 to->cpt_si_timerid = from->si_timerid;
1124 to->cpt_si_overrun = from->si_overrun;
1125 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1126 }
1127 else if (to->si_code == SI_USER)
1128 {
1129 to->cpt_si_pid = from->si_pid;
1130 to->cpt_si_uid = from->si_uid;
1131 }
1132 else if (to->si_code < 0)
1133 {
1134 to->cpt_si_pid = from->si_pid;
1135 to->cpt_si_uid = from->si_uid;
1136 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1137 }
1138 else
1139 {
1140 switch (to->si_signo)
1141 {
1142 case SIGCHLD:
1143 to->cpt_si_pid = from->si_pid;
1144 to->cpt_si_uid = from->si_uid;
1145 to->cpt_si_status = from->si_status;
1146 to->cpt_si_utime = from->si_utime;
1147 to->cpt_si_stime = from->si_stime;
1148 break;
1149 case SIGILL:
1150 case SIGFPE:
1151 case SIGSEGV:
1152 case SIGBUS:
1153 to->cpt_si_addr = (intptr_t) from->si_addr;
1154 break;
1155 case SIGPOLL:
1156 to->cpt_si_band = from->si_band;
1157 to->cpt_si_fd = from->si_fd;
1158 break;
1159 default:
1160 to->cpt_si_pid = from->si_pid;
1161 to->cpt_si_uid = from->si_uid;
1162 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1163 break;
1164 }
1165 }
1166 }
1167
1168 static void
1169 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1170 compat_x32_siginfo_t *from)
1171 {
1172 memset (to, 0, sizeof (*to));
1173
1174 to->si_signo = from->si_signo;
1175 to->si_errno = from->si_errno;
1176 to->si_code = from->si_code;
1177
1178 if (to->si_code == SI_TIMER)
1179 {
1180 to->si_timerid = from->cpt_si_timerid;
1181 to->si_overrun = from->cpt_si_overrun;
1182 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1183 }
1184 else if (to->si_code == SI_USER)
1185 {
1186 to->si_pid = from->cpt_si_pid;
1187 to->si_uid = from->cpt_si_uid;
1188 }
1189 else if (to->si_code < 0)
1190 {
1191 to->si_pid = from->cpt_si_pid;
1192 to->si_uid = from->cpt_si_uid;
1193 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1194 }
1195 else
1196 {
1197 switch (to->si_signo)
1198 {
1199 case SIGCHLD:
1200 to->si_pid = from->cpt_si_pid;
1201 to->si_uid = from->cpt_si_uid;
1202 to->si_status = from->cpt_si_status;
1203 to->si_utime = from->cpt_si_utime;
1204 to->si_stime = from->cpt_si_stime;
1205 break;
1206 case SIGILL:
1207 case SIGFPE:
1208 case SIGSEGV:
1209 case SIGBUS:
1210 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1211 break;
1212 case SIGPOLL:
1213 to->si_band = from->cpt_si_band;
1214 to->si_fd = from->cpt_si_fd;
1215 break;
1216 default:
1217 to->si_pid = from->cpt_si_pid;
1218 to->si_uid = from->cpt_si_uid;
1219 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1220 break;
1221 }
1222 }
1223 }
1224
1225 #endif /* __x86_64__ */
1226
1227 /* Convert a native/host siginfo object, into/from the siginfo in the
1228 layout of the inferiors' architecture. Returns true if any
1229 conversion was done; false otherwise. If DIRECTION is 1, then copy
1230 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1231 INF. */
1232
1233 static int
1234 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1235 {
1236 #ifdef __x86_64__
1237 unsigned int machine;
1238 int tid = lwpid_of (current_thread);
1239 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1240
1241 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1242 if (!is_64bit_tdesc ())
1243 {
1244 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1245
1246 if (direction == 0)
1247 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1248 else
1249 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1250
1251 return 1;
1252 }
1253 /* No fixup for native x32 GDB. */
1254 else if (!is_elf64 && sizeof (void *) == 8)
1255 {
1256 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1257
1258 if (direction == 0)
1259 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1260 native);
1261 else
1262 siginfo_from_compat_x32_siginfo (native,
1263 (struct compat_x32_siginfo *) inf);
1264
1265 return 1;
1266 }
1267 #endif
1268
1269 return 0;
1270 }
1271 \f
1272 static int use_xml;
1273
1274 /* Format of XSAVE extended state is:
1275 struct
1276 {
1277 fxsave_bytes[0..463]
1278 sw_usable_bytes[464..511]
1279 xstate_hdr_bytes[512..575]
1280 avx_bytes[576..831]
1281 future_state etc
1282 };
1283
1284 Same memory layout will be used for the coredump NT_X86_XSTATE
1285 representing the XSAVE extended state registers.
1286
1287 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1288 extended state mask, which is the same as the extended control register
1289 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1290 together with the mask saved in the xstate_hdr_bytes to determine what
1291 states the processor/OS supports and what state, used or initialized,
1292 the process/thread is in. */
1293 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1294
1295 /* Does the current host support the GETFPXREGS request? The header
1296 file may or may not define it, and even if it is defined, the
1297 kernel will return EIO if it's running on a pre-SSE processor. */
1298 int have_ptrace_getfpxregs =
1299 #ifdef HAVE_PTRACE_GETFPXREGS
1300 -1
1301 #else
1302 0
1303 #endif
1304 ;
1305
1306 /* Does the current host support PTRACE_GETREGSET? */
1307 static int have_ptrace_getregset = -1;
1308
1309 /* Get Linux/x86 target description from running target. */
1310
1311 static const struct target_desc *
1312 x86_linux_read_description (void)
1313 {
1314 unsigned int machine;
1315 int is_elf64;
1316 int xcr0_features;
1317 int tid;
1318 static uint64_t xcr0;
1319 struct regset_info *regset;
1320
1321 tid = lwpid_of (current_thread);
1322
1323 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1324
1325 if (sizeof (void *) == 4)
1326 {
1327 if (is_elf64 > 0)
1328 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1329 #ifndef __x86_64__
1330 else if (machine == EM_X86_64)
1331 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1332 #endif
1333 }
1334
1335 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1336 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1337 {
1338 elf_fpxregset_t fpxregs;
1339
1340 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1341 {
1342 have_ptrace_getfpxregs = 0;
1343 have_ptrace_getregset = 0;
1344 return tdesc_i386_mmx_linux;
1345 }
1346 else
1347 have_ptrace_getfpxregs = 1;
1348 }
1349 #endif
1350
1351 if (!use_xml)
1352 {
1353 x86_xcr0 = X86_XSTATE_SSE_MASK;
1354
1355 /* Don't use XML. */
1356 #ifdef __x86_64__
1357 if (machine == EM_X86_64)
1358 return tdesc_amd64_linux_no_xml;
1359 else
1360 #endif
1361 return tdesc_i386_linux_no_xml;
1362 }
1363
1364 if (have_ptrace_getregset == -1)
1365 {
1366 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1367 struct iovec iov;
1368
1369 iov.iov_base = xstateregs;
1370 iov.iov_len = sizeof (xstateregs);
1371
1372 /* Check if PTRACE_GETREGSET works. */
1373 if (ptrace (PTRACE_GETREGSET, tid,
1374 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1375 have_ptrace_getregset = 0;
1376 else
1377 {
1378 have_ptrace_getregset = 1;
1379
1380 /* Get XCR0 from XSAVE extended state. */
1381 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1382 / sizeof (uint64_t))];
1383
1384 /* Use PTRACE_GETREGSET if it is available. */
1385 for (regset = x86_regsets;
1386 regset->fill_function != NULL; regset++)
1387 if (regset->get_request == PTRACE_GETREGSET)
1388 regset->size = X86_XSTATE_SIZE (xcr0);
1389 else if (regset->type != GENERAL_REGS)
1390 regset->size = 0;
1391 }
1392 }
1393
1394 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1395 xcr0_features = (have_ptrace_getregset
1396 && (xcr0 & X86_XSTATE_ALL_MASK));
1397
1398 if (xcr0_features)
1399 x86_xcr0 = xcr0;
1400
1401 if (machine == EM_X86_64)
1402 {
1403 #ifdef __x86_64__
1404 if (is_elf64)
1405 {
1406 if (xcr0_features)
1407 {
1408 switch (xcr0 & X86_XSTATE_ALL_MASK)
1409 {
1410 case X86_XSTATE_AVX512_MASK:
1411 return tdesc_amd64_avx512_linux;
1412
1413 case X86_XSTATE_MPX_MASK:
1414 return tdesc_amd64_mpx_linux;
1415
1416 case X86_XSTATE_AVX_MASK:
1417 return tdesc_amd64_avx_linux;
1418
1419 default:
1420 return tdesc_amd64_linux;
1421 }
1422 }
1423 else
1424 return tdesc_amd64_linux;
1425 }
1426 else
1427 {
1428 if (xcr0_features)
1429 {
1430 switch (xcr0 & X86_XSTATE_ALL_MASK)
1431 {
1432 case X86_XSTATE_AVX512_MASK:
1433 return tdesc_x32_avx512_linux;
1434
1435 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1436 case X86_XSTATE_AVX_MASK:
1437 return tdesc_x32_avx_linux;
1438
1439 default:
1440 return tdesc_x32_linux;
1441 }
1442 }
1443 else
1444 return tdesc_x32_linux;
1445 }
1446 #endif
1447 }
1448 else
1449 {
1450 if (xcr0_features)
1451 {
1452 switch (xcr0 & X86_XSTATE_ALL_MASK)
1453 {
1454 case (X86_XSTATE_AVX512_MASK):
1455 return tdesc_i386_avx512_linux;
1456
1457 case (X86_XSTATE_MPX_MASK):
1458 return tdesc_i386_mpx_linux;
1459
1460 case (X86_XSTATE_AVX_MASK):
1461 return tdesc_i386_avx_linux;
1462
1463 default:
1464 return tdesc_i386_linux;
1465 }
1466 }
1467 else
1468 return tdesc_i386_linux;
1469 }
1470
1471 gdb_assert_not_reached ("failed to return tdesc");
1472 }
1473
1474 /* Callback for find_inferior. Stops iteration when a thread with a
1475 given PID is found. */
1476
1477 static int
1478 same_process_callback (struct inferior_list_entry *entry, void *data)
1479 {
1480 int pid = *(int *) data;
1481
1482 return (ptid_get_pid (entry->id) == pid);
1483 }
1484
1485 /* Callback for for_each_inferior. Calls the arch_setup routine for
1486 each process. */
1487
1488 static void
1489 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1490 {
1491 int pid = ptid_get_pid (entry->id);
1492
1493 /* Look up any thread of this processes. */
1494 current_thread
1495 = (struct thread_info *) find_inferior (&all_threads,
1496 same_process_callback, &pid);
1497
1498 the_low_target.arch_setup ();
1499 }
1500
1501 /* Update all the target description of all processes; a new GDB
1502 connected, and it may or not support xml target descriptions. */
1503
1504 static void
1505 x86_linux_update_xmltarget (void)
1506 {
1507 struct thread_info *saved_thread = current_thread;
1508
1509 /* Before changing the register cache's internal layout, flush the
1510 contents of the current valid caches back to the threads, and
1511 release the current regcache objects. */
1512 regcache_release ();
1513
1514 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1515
1516 current_thread = saved_thread;
1517 }
1518
1519 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1520 PTRACE_GETREGSET. */
1521
1522 static void
1523 x86_linux_process_qsupported (const char *query)
1524 {
1525 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1526 with "i386" in qSupported query, it supports x86 XML target
1527 descriptions. */
1528 use_xml = 0;
1529 if (query != NULL && startswith (query, "xmlRegisters="))
1530 {
1531 char *copy = xstrdup (query + 13);
1532 char *p;
1533
1534 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1535 {
1536 if (strcmp (p, "i386") == 0)
1537 {
1538 use_xml = 1;
1539 break;
1540 }
1541 }
1542
1543 free (copy);
1544 }
1545
1546 x86_linux_update_xmltarget ();
1547 }
1548
1549 /* Common for x86/x86-64. */
1550
1551 static struct regsets_info x86_regsets_info =
1552 {
1553 x86_regsets, /* regsets */
1554 0, /* num_regsets */
1555 NULL, /* disabled_regsets */
1556 };
1557
1558 #ifdef __x86_64__
1559 static struct regs_info amd64_linux_regs_info =
1560 {
1561 NULL, /* regset_bitmap */
1562 NULL, /* usrregs_info */
1563 &x86_regsets_info
1564 };
1565 #endif
1566 static struct usrregs_info i386_linux_usrregs_info =
1567 {
1568 I386_NUM_REGS,
1569 i386_regmap,
1570 };
1571
1572 static struct regs_info i386_linux_regs_info =
1573 {
1574 NULL, /* regset_bitmap */
1575 &i386_linux_usrregs_info,
1576 &x86_regsets_info
1577 };
1578
1579 const struct regs_info *
1580 x86_linux_regs_info (void)
1581 {
1582 #ifdef __x86_64__
1583 if (is_64bit_tdesc ())
1584 return &amd64_linux_regs_info;
1585 else
1586 #endif
1587 return &i386_linux_regs_info;
1588 }
1589
1590 /* Initialize the target description for the architecture of the
1591 inferior. */
1592
1593 static void
1594 x86_arch_setup (void)
1595 {
1596 current_process ()->tdesc = x86_linux_read_description ();
1597 }
1598
1599 static int
1600 x86_supports_tracepoints (void)
1601 {
1602 return 1;
1603 }
1604
1605 static void
1606 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1607 {
1608 write_inferior_memory (*to, buf, len);
1609 *to += len;
1610 }
1611
1612 static int
1613 push_opcode (unsigned char *buf, char *op)
1614 {
1615 unsigned char *buf_org = buf;
1616
1617 while (1)
1618 {
1619 char *endptr;
1620 unsigned long ul = strtoul (op, &endptr, 16);
1621
1622 if (endptr == op)
1623 break;
1624
1625 *buf++ = ul;
1626 op = endptr;
1627 }
1628
1629 return buf - buf_org;
1630 }
1631
1632 #ifdef __x86_64__
1633
1634 /* Build a jump pad that saves registers and calls a collection
1635 function. Writes a jump instruction to the jump pad to
1636 JJUMPAD_INSN. The caller is responsible to write it in at the
1637 tracepoint address. */
1638
1639 static int
1640 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1641 CORE_ADDR collector,
1642 CORE_ADDR lockaddr,
1643 ULONGEST orig_size,
1644 CORE_ADDR *jump_entry,
1645 CORE_ADDR *trampoline,
1646 ULONGEST *trampoline_size,
1647 unsigned char *jjump_pad_insn,
1648 ULONGEST *jjump_pad_insn_size,
1649 CORE_ADDR *adjusted_insn_addr,
1650 CORE_ADDR *adjusted_insn_addr_end,
1651 char *err)
1652 {
1653 unsigned char buf[40];
1654 int i, offset;
1655 int64_t loffset;
1656
1657 CORE_ADDR buildaddr = *jump_entry;
1658
1659 /* Build the jump pad. */
1660
1661 /* First, do tracepoint data collection. Save registers. */
1662 i = 0;
1663 /* Need to ensure stack pointer saved first. */
1664 buf[i++] = 0x54; /* push %rsp */
1665 buf[i++] = 0x55; /* push %rbp */
1666 buf[i++] = 0x57; /* push %rdi */
1667 buf[i++] = 0x56; /* push %rsi */
1668 buf[i++] = 0x52; /* push %rdx */
1669 buf[i++] = 0x51; /* push %rcx */
1670 buf[i++] = 0x53; /* push %rbx */
1671 buf[i++] = 0x50; /* push %rax */
1672 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1673 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1674 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1675 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1676 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1677 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1678 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1679 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1680 buf[i++] = 0x9c; /* pushfq */
1681 buf[i++] = 0x48; /* movl <addr>,%rdi */
1682 buf[i++] = 0xbf;
1683 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1684 i += sizeof (unsigned long);
1685 buf[i++] = 0x57; /* push %rdi */
1686 append_insns (&buildaddr, i, buf);
1687
1688 /* Stack space for the collecting_t object. */
1689 i = 0;
1690 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1691 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1692 memcpy (buf + i, &tpoint, 8);
1693 i += 8;
1694 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1695 i += push_opcode (&buf[i],
1696 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1697 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1698 append_insns (&buildaddr, i, buf);
1699
1700 /* spin-lock. */
1701 i = 0;
1702 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1703 memcpy (&buf[i], (void *) &lockaddr, 8);
1704 i += 8;
1705 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1706 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1707 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1708 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1709 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1710 append_insns (&buildaddr, i, buf);
1711
1712 /* Set up the gdb_collect call. */
1713 /* At this point, (stack pointer + 0x18) is the base of our saved
1714 register block. */
1715
1716 i = 0;
1717 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1718 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1719
1720 /* tpoint address may be 64-bit wide. */
1721 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1722 memcpy (buf + i, &tpoint, 8);
1723 i += 8;
1724 append_insns (&buildaddr, i, buf);
1725
1726 /* The collector function being in the shared library, may be
1727 >31-bits away off the jump pad. */
1728 i = 0;
1729 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1730 memcpy (buf + i, &collector, 8);
1731 i += 8;
1732 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1733 append_insns (&buildaddr, i, buf);
1734
1735 /* Clear the spin-lock. */
1736 i = 0;
1737 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1738 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1739 memcpy (buf + i, &lockaddr, 8);
1740 i += 8;
1741 append_insns (&buildaddr, i, buf);
1742
1743 /* Remove stack that had been used for the collect_t object. */
1744 i = 0;
1745 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1746 append_insns (&buildaddr, i, buf);
1747
1748 /* Restore register state. */
1749 i = 0;
1750 buf[i++] = 0x48; /* add $0x8,%rsp */
1751 buf[i++] = 0x83;
1752 buf[i++] = 0xc4;
1753 buf[i++] = 0x08;
1754 buf[i++] = 0x9d; /* popfq */
1755 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1756 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1757 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1758 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1759 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1760 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1761 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1762 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1763 buf[i++] = 0x58; /* pop %rax */
1764 buf[i++] = 0x5b; /* pop %rbx */
1765 buf[i++] = 0x59; /* pop %rcx */
1766 buf[i++] = 0x5a; /* pop %rdx */
1767 buf[i++] = 0x5e; /* pop %rsi */
1768 buf[i++] = 0x5f; /* pop %rdi */
1769 buf[i++] = 0x5d; /* pop %rbp */
1770 buf[i++] = 0x5c; /* pop %rsp */
1771 append_insns (&buildaddr, i, buf);
1772
1773 /* Now, adjust the original instruction to execute in the jump
1774 pad. */
1775 *adjusted_insn_addr = buildaddr;
1776 relocate_instruction (&buildaddr, tpaddr);
1777 *adjusted_insn_addr_end = buildaddr;
1778
1779 /* Finally, write a jump back to the program. */
1780
1781 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1782 if (loffset > INT_MAX || loffset < INT_MIN)
1783 {
1784 sprintf (err,
1785 "E.Jump back from jump pad too far from tracepoint "
1786 "(offset 0x%" PRIx64 " > int32).", loffset);
1787 return 1;
1788 }
1789
1790 offset = (int) loffset;
1791 memcpy (buf, jump_insn, sizeof (jump_insn));
1792 memcpy (buf + 1, &offset, 4);
1793 append_insns (&buildaddr, sizeof (jump_insn), buf);
1794
1795 /* The jump pad is now built. Wire in a jump to our jump pad. This
1796 is always done last (by our caller actually), so that we can
1797 install fast tracepoints with threads running. This relies on
1798 the agent's atomic write support. */
1799 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1800 if (loffset > INT_MAX || loffset < INT_MIN)
1801 {
1802 sprintf (err,
1803 "E.Jump pad too far from tracepoint "
1804 "(offset 0x%" PRIx64 " > int32).", loffset);
1805 return 1;
1806 }
1807
1808 offset = (int) loffset;
1809
1810 memcpy (buf, jump_insn, sizeof (jump_insn));
1811 memcpy (buf + 1, &offset, 4);
1812 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1813 *jjump_pad_insn_size = sizeof (jump_insn);
1814
1815 /* Return the end address of our pad. */
1816 *jump_entry = buildaddr;
1817
1818 return 0;
1819 }
1820
1821 #endif /* __x86_64__ */
1822
1823 /* Build a jump pad that saves registers and calls a collection
1824 function. Writes a jump instruction to the jump pad to
1825 JJUMPAD_INSN. The caller is responsible to write it in at the
1826 tracepoint address. */
1827
1828 static int
1829 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1830 CORE_ADDR collector,
1831 CORE_ADDR lockaddr,
1832 ULONGEST orig_size,
1833 CORE_ADDR *jump_entry,
1834 CORE_ADDR *trampoline,
1835 ULONGEST *trampoline_size,
1836 unsigned char *jjump_pad_insn,
1837 ULONGEST *jjump_pad_insn_size,
1838 CORE_ADDR *adjusted_insn_addr,
1839 CORE_ADDR *adjusted_insn_addr_end,
1840 char *err)
1841 {
1842 unsigned char buf[0x100];
1843 int i, offset;
1844 CORE_ADDR buildaddr = *jump_entry;
1845
1846 /* Build the jump pad. */
1847
1848 /* First, do tracepoint data collection. Save registers. */
1849 i = 0;
1850 buf[i++] = 0x60; /* pushad */
1851 buf[i++] = 0x68; /* push tpaddr aka $pc */
1852 *((int *)(buf + i)) = (int) tpaddr;
1853 i += 4;
1854 buf[i++] = 0x9c; /* pushf */
1855 buf[i++] = 0x1e; /* push %ds */
1856 buf[i++] = 0x06; /* push %es */
1857 buf[i++] = 0x0f; /* push %fs */
1858 buf[i++] = 0xa0;
1859 buf[i++] = 0x0f; /* push %gs */
1860 buf[i++] = 0xa8;
1861 buf[i++] = 0x16; /* push %ss */
1862 buf[i++] = 0x0e; /* push %cs */
1863 append_insns (&buildaddr, i, buf);
1864
1865 /* Stack space for the collecting_t object. */
1866 i = 0;
1867 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1868
1869 /* Build the object. */
1870 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1871 memcpy (buf + i, &tpoint, 4);
1872 i += 4;
1873 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1874
1875 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1876 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1877 append_insns (&buildaddr, i, buf);
1878
1879 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1880 If we cared for it, this could be using xchg alternatively. */
1881
1882 i = 0;
1883 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1884 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1885 %esp,<lockaddr> */
1886 memcpy (&buf[i], (void *) &lockaddr, 4);
1887 i += 4;
1888 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1889 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1890 append_insns (&buildaddr, i, buf);
1891
1892
1893 /* Set up arguments to the gdb_collect call. */
1894 i = 0;
1895 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1896 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1897 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1898 append_insns (&buildaddr, i, buf);
1899
1900 i = 0;
1901 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1902 append_insns (&buildaddr, i, buf);
1903
1904 i = 0;
1905 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1906 memcpy (&buf[i], (void *) &tpoint, 4);
1907 i += 4;
1908 append_insns (&buildaddr, i, buf);
1909
1910 buf[0] = 0xe8; /* call <reladdr> */
1911 offset = collector - (buildaddr + sizeof (jump_insn));
1912 memcpy (buf + 1, &offset, 4);
1913 append_insns (&buildaddr, 5, buf);
1914 /* Clean up after the call. */
1915 buf[0] = 0x83; /* add $0x8,%esp */
1916 buf[1] = 0xc4;
1917 buf[2] = 0x08;
1918 append_insns (&buildaddr, 3, buf);
1919
1920
1921 /* Clear the spin-lock. This would need the LOCK prefix on older
1922 broken archs. */
1923 i = 0;
1924 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1925 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1926 memcpy (buf + i, &lockaddr, 4);
1927 i += 4;
1928 append_insns (&buildaddr, i, buf);
1929
1930
1931 /* Remove stack that had been used for the collect_t object. */
1932 i = 0;
1933 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1934 append_insns (&buildaddr, i, buf);
1935
1936 i = 0;
1937 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1938 buf[i++] = 0xc4;
1939 buf[i++] = 0x04;
1940 buf[i++] = 0x17; /* pop %ss */
1941 buf[i++] = 0x0f; /* pop %gs */
1942 buf[i++] = 0xa9;
1943 buf[i++] = 0x0f; /* pop %fs */
1944 buf[i++] = 0xa1;
1945 buf[i++] = 0x07; /* pop %es */
1946 buf[i++] = 0x1f; /* pop %ds */
1947 buf[i++] = 0x9d; /* popf */
1948 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1949 buf[i++] = 0xc4;
1950 buf[i++] = 0x04;
1951 buf[i++] = 0x61; /* popad */
1952 append_insns (&buildaddr, i, buf);
1953
1954 /* Now, adjust the original instruction to execute in the jump
1955 pad. */
1956 *adjusted_insn_addr = buildaddr;
1957 relocate_instruction (&buildaddr, tpaddr);
1958 *adjusted_insn_addr_end = buildaddr;
1959
1960 /* Write the jump back to the program. */
1961 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1962 memcpy (buf, jump_insn, sizeof (jump_insn));
1963 memcpy (buf + 1, &offset, 4);
1964 append_insns (&buildaddr, sizeof (jump_insn), buf);
1965
1966 /* The jump pad is now built. Wire in a jump to our jump pad. This
1967 is always done last (by our caller actually), so that we can
1968 install fast tracepoints with threads running. This relies on
1969 the agent's atomic write support. */
1970 if (orig_size == 4)
1971 {
1972 /* Create a trampoline. */
1973 *trampoline_size = sizeof (jump_insn);
1974 if (!claim_trampoline_space (*trampoline_size, trampoline))
1975 {
1976 /* No trampoline space available. */
1977 strcpy (err,
1978 "E.Cannot allocate trampoline space needed for fast "
1979 "tracepoints on 4-byte instructions.");
1980 return 1;
1981 }
1982
1983 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1984 memcpy (buf, jump_insn, sizeof (jump_insn));
1985 memcpy (buf + 1, &offset, 4);
1986 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1987
1988 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1989 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1990 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1991 memcpy (buf + 2, &offset, 2);
1992 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1993 *jjump_pad_insn_size = sizeof (small_jump_insn);
1994 }
1995 else
1996 {
1997 /* Else use a 32-bit relative jump instruction. */
1998 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1999 memcpy (buf, jump_insn, sizeof (jump_insn));
2000 memcpy (buf + 1, &offset, 4);
2001 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
2002 *jjump_pad_insn_size = sizeof (jump_insn);
2003 }
2004
2005 /* Return the end address of our pad. */
2006 *jump_entry = buildaddr;
2007
2008 return 0;
2009 }
2010
2011 static int
2012 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2013 CORE_ADDR collector,
2014 CORE_ADDR lockaddr,
2015 ULONGEST orig_size,
2016 CORE_ADDR *jump_entry,
2017 CORE_ADDR *trampoline,
2018 ULONGEST *trampoline_size,
2019 unsigned char *jjump_pad_insn,
2020 ULONGEST *jjump_pad_insn_size,
2021 CORE_ADDR *adjusted_insn_addr,
2022 CORE_ADDR *adjusted_insn_addr_end,
2023 char *err)
2024 {
2025 #ifdef __x86_64__
2026 if (is_64bit_tdesc ())
2027 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2028 collector, lockaddr,
2029 orig_size, jump_entry,
2030 trampoline, trampoline_size,
2031 jjump_pad_insn,
2032 jjump_pad_insn_size,
2033 adjusted_insn_addr,
2034 adjusted_insn_addr_end,
2035 err);
2036 #endif
2037
2038 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2039 collector, lockaddr,
2040 orig_size, jump_entry,
2041 trampoline, trampoline_size,
2042 jjump_pad_insn,
2043 jjump_pad_insn_size,
2044 adjusted_insn_addr,
2045 adjusted_insn_addr_end,
2046 err);
2047 }
2048
2049 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2050 architectures. */
2051
2052 static int
2053 x86_get_min_fast_tracepoint_insn_len (void)
2054 {
2055 static int warned_about_fast_tracepoints = 0;
2056
2057 #ifdef __x86_64__
2058 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2059 used for fast tracepoints. */
2060 if (is_64bit_tdesc ())
2061 return 5;
2062 #endif
2063
2064 if (agent_loaded_p ())
2065 {
2066 char errbuf[IPA_BUFSIZ];
2067
2068 errbuf[0] = '\0';
2069
2070 /* On x86, if trampolines are available, then 4-byte jump instructions
2071 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2072 with a 4-byte offset are used instead. */
2073 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2074 return 4;
2075 else
2076 {
2077 /* GDB has no channel to explain to user why a shorter fast
2078 tracepoint is not possible, but at least make GDBserver
2079 mention that something has gone awry. */
2080 if (!warned_about_fast_tracepoints)
2081 {
2082 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2083 warned_about_fast_tracepoints = 1;
2084 }
2085 return 5;
2086 }
2087 }
2088 else
2089 {
2090 /* Indicate that the minimum length is currently unknown since the IPA
2091 has not loaded yet. */
2092 return 0;
2093 }
2094 }
2095
2096 static void
2097 add_insns (unsigned char *start, int len)
2098 {
2099 CORE_ADDR buildaddr = current_insn_ptr;
2100
2101 if (debug_threads)
2102 debug_printf ("Adding %d bytes of insn at %s\n",
2103 len, paddress (buildaddr));
2104
2105 append_insns (&buildaddr, len, start);
2106 current_insn_ptr = buildaddr;
2107 }
2108
2109 /* Our general strategy for emitting code is to avoid specifying raw
2110 bytes whenever possible, and instead copy a block of inline asm
2111 that is embedded in the function. This is a little messy, because
2112 we need to keep the compiler from discarding what looks like dead
2113 code, plus suppress various warnings. */
2114
2115 #define EMIT_ASM(NAME, INSNS) \
2116 do \
2117 { \
2118 extern unsigned char start_ ## NAME, end_ ## NAME; \
2119 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2120 __asm__ ("jmp end_" #NAME "\n" \
2121 "\t" "start_" #NAME ":" \
2122 "\t" INSNS "\n" \
2123 "\t" "end_" #NAME ":"); \
2124 } while (0)
2125
2126 #ifdef __x86_64__
2127
2128 #define EMIT_ASM32(NAME,INSNS) \
2129 do \
2130 { \
2131 extern unsigned char start_ ## NAME, end_ ## NAME; \
2132 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2133 __asm__ (".code32\n" \
2134 "\t" "jmp end_" #NAME "\n" \
2135 "\t" "start_" #NAME ":\n" \
2136 "\t" INSNS "\n" \
2137 "\t" "end_" #NAME ":\n" \
2138 ".code64\n"); \
2139 } while (0)
2140
2141 #else
2142
2143 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2144
2145 #endif
2146
2147 #ifdef __x86_64__
2148
2149 static void
2150 amd64_emit_prologue (void)
2151 {
2152 EMIT_ASM (amd64_prologue,
2153 "pushq %rbp\n\t"
2154 "movq %rsp,%rbp\n\t"
2155 "sub $0x20,%rsp\n\t"
2156 "movq %rdi,-8(%rbp)\n\t"
2157 "movq %rsi,-16(%rbp)");
2158 }
2159
2160
2161 static void
2162 amd64_emit_epilogue (void)
2163 {
2164 EMIT_ASM (amd64_epilogue,
2165 "movq -16(%rbp),%rdi\n\t"
2166 "movq %rax,(%rdi)\n\t"
2167 "xor %rax,%rax\n\t"
2168 "leave\n\t"
2169 "ret");
2170 }
2171
2172 static void
2173 amd64_emit_add (void)
2174 {
2175 EMIT_ASM (amd64_add,
2176 "add (%rsp),%rax\n\t"
2177 "lea 0x8(%rsp),%rsp");
2178 }
2179
2180 static void
2181 amd64_emit_sub (void)
2182 {
2183 EMIT_ASM (amd64_sub,
2184 "sub %rax,(%rsp)\n\t"
2185 "pop %rax");
2186 }
2187
2188 static void
2189 amd64_emit_mul (void)
2190 {
2191 emit_error = 1;
2192 }
2193
2194 static void
2195 amd64_emit_lsh (void)
2196 {
2197 emit_error = 1;
2198 }
2199
2200 static void
2201 amd64_emit_rsh_signed (void)
2202 {
2203 emit_error = 1;
2204 }
2205
2206 static void
2207 amd64_emit_rsh_unsigned (void)
2208 {
2209 emit_error = 1;
2210 }
2211
2212 static void
2213 amd64_emit_ext (int arg)
2214 {
2215 switch (arg)
2216 {
2217 case 8:
2218 EMIT_ASM (amd64_ext_8,
2219 "cbtw\n\t"
2220 "cwtl\n\t"
2221 "cltq");
2222 break;
2223 case 16:
2224 EMIT_ASM (amd64_ext_16,
2225 "cwtl\n\t"
2226 "cltq");
2227 break;
2228 case 32:
2229 EMIT_ASM (amd64_ext_32,
2230 "cltq");
2231 break;
2232 default:
2233 emit_error = 1;
2234 }
2235 }
2236
2237 static void
2238 amd64_emit_log_not (void)
2239 {
2240 EMIT_ASM (amd64_log_not,
2241 "test %rax,%rax\n\t"
2242 "sete %cl\n\t"
2243 "movzbq %cl,%rax");
2244 }
2245
2246 static void
2247 amd64_emit_bit_and (void)
2248 {
2249 EMIT_ASM (amd64_and,
2250 "and (%rsp),%rax\n\t"
2251 "lea 0x8(%rsp),%rsp");
2252 }
2253
2254 static void
2255 amd64_emit_bit_or (void)
2256 {
2257 EMIT_ASM (amd64_or,
2258 "or (%rsp),%rax\n\t"
2259 "lea 0x8(%rsp),%rsp");
2260 }
2261
2262 static void
2263 amd64_emit_bit_xor (void)
2264 {
2265 EMIT_ASM (amd64_xor,
2266 "xor (%rsp),%rax\n\t"
2267 "lea 0x8(%rsp),%rsp");
2268 }
2269
2270 static void
2271 amd64_emit_bit_not (void)
2272 {
2273 EMIT_ASM (amd64_bit_not,
2274 "xorq $0xffffffffffffffff,%rax");
2275 }
2276
2277 static void
2278 amd64_emit_equal (void)
2279 {
2280 EMIT_ASM (amd64_equal,
2281 "cmp %rax,(%rsp)\n\t"
2282 "je .Lamd64_equal_true\n\t"
2283 "xor %rax,%rax\n\t"
2284 "jmp .Lamd64_equal_end\n\t"
2285 ".Lamd64_equal_true:\n\t"
2286 "mov $0x1,%rax\n\t"
2287 ".Lamd64_equal_end:\n\t"
2288 "lea 0x8(%rsp),%rsp");
2289 }
2290
2291 static void
2292 amd64_emit_less_signed (void)
2293 {
2294 EMIT_ASM (amd64_less_signed,
2295 "cmp %rax,(%rsp)\n\t"
2296 "jl .Lamd64_less_signed_true\n\t"
2297 "xor %rax,%rax\n\t"
2298 "jmp .Lamd64_less_signed_end\n\t"
2299 ".Lamd64_less_signed_true:\n\t"
2300 "mov $1,%rax\n\t"
2301 ".Lamd64_less_signed_end:\n\t"
2302 "lea 0x8(%rsp),%rsp");
2303 }
2304
2305 static void
2306 amd64_emit_less_unsigned (void)
2307 {
2308 EMIT_ASM (amd64_less_unsigned,
2309 "cmp %rax,(%rsp)\n\t"
2310 "jb .Lamd64_less_unsigned_true\n\t"
2311 "xor %rax,%rax\n\t"
2312 "jmp .Lamd64_less_unsigned_end\n\t"
2313 ".Lamd64_less_unsigned_true:\n\t"
2314 "mov $1,%rax\n\t"
2315 ".Lamd64_less_unsigned_end:\n\t"
2316 "lea 0x8(%rsp),%rsp");
2317 }
2318
2319 static void
2320 amd64_emit_ref (int size)
2321 {
2322 switch (size)
2323 {
2324 case 1:
2325 EMIT_ASM (amd64_ref1,
2326 "movb (%rax),%al");
2327 break;
2328 case 2:
2329 EMIT_ASM (amd64_ref2,
2330 "movw (%rax),%ax");
2331 break;
2332 case 4:
2333 EMIT_ASM (amd64_ref4,
2334 "movl (%rax),%eax");
2335 break;
2336 case 8:
2337 EMIT_ASM (amd64_ref8,
2338 "movq (%rax),%rax");
2339 break;
2340 }
2341 }
2342
2343 static void
2344 amd64_emit_if_goto (int *offset_p, int *size_p)
2345 {
2346 EMIT_ASM (amd64_if_goto,
2347 "mov %rax,%rcx\n\t"
2348 "pop %rax\n\t"
2349 "cmp $0,%rcx\n\t"
2350 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2351 if (offset_p)
2352 *offset_p = 10;
2353 if (size_p)
2354 *size_p = 4;
2355 }
2356
2357 static void
2358 amd64_emit_goto (int *offset_p, int *size_p)
2359 {
2360 EMIT_ASM (amd64_goto,
2361 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2362 if (offset_p)
2363 *offset_p = 1;
2364 if (size_p)
2365 *size_p = 4;
2366 }
2367
2368 static void
2369 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2370 {
2371 int diff = (to - (from + size));
2372 unsigned char buf[sizeof (int)];
2373
2374 if (size != 4)
2375 {
2376 emit_error = 1;
2377 return;
2378 }
2379
2380 memcpy (buf, &diff, sizeof (int));
2381 write_inferior_memory (from, buf, sizeof (int));
2382 }
2383
2384 static void
2385 amd64_emit_const (LONGEST num)
2386 {
2387 unsigned char buf[16];
2388 int i;
2389 CORE_ADDR buildaddr = current_insn_ptr;
2390
2391 i = 0;
2392 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2393 memcpy (&buf[i], &num, sizeof (num));
2394 i += 8;
2395 append_insns (&buildaddr, i, buf);
2396 current_insn_ptr = buildaddr;
2397 }
2398
2399 static void
2400 amd64_emit_call (CORE_ADDR fn)
2401 {
2402 unsigned char buf[16];
2403 int i;
2404 CORE_ADDR buildaddr;
2405 LONGEST offset64;
2406
2407 /* The destination function being in the shared library, may be
2408 >31-bits away off the compiled code pad. */
2409
2410 buildaddr = current_insn_ptr;
2411
2412 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2413
2414 i = 0;
2415
2416 if (offset64 > INT_MAX || offset64 < INT_MIN)
2417 {
2418 /* Offset is too large for a call. Use callq, but that requires
2419 a register, so avoid it if possible. Use r10, since it is
2420 call-clobbered, we don't have to push/pop it. */
2421 buf[i++] = 0x48; /* mov $fn,%r10 */
2422 buf[i++] = 0xba;
2423 memcpy (buf + i, &fn, 8);
2424 i += 8;
2425 buf[i++] = 0xff; /* callq *%r10 */
2426 buf[i++] = 0xd2;
2427 }
2428 else
2429 {
2430 int offset32 = offset64; /* we know we can't overflow here. */
2431 memcpy (buf + i, &offset32, 4);
2432 i += 4;
2433 }
2434
2435 append_insns (&buildaddr, i, buf);
2436 current_insn_ptr = buildaddr;
2437 }
2438
2439 static void
2440 amd64_emit_reg (int reg)
2441 {
2442 unsigned char buf[16];
2443 int i;
2444 CORE_ADDR buildaddr;
2445
2446 /* Assume raw_regs is still in %rdi. */
2447 buildaddr = current_insn_ptr;
2448 i = 0;
2449 buf[i++] = 0xbe; /* mov $<n>,%esi */
2450 memcpy (&buf[i], &reg, sizeof (reg));
2451 i += 4;
2452 append_insns (&buildaddr, i, buf);
2453 current_insn_ptr = buildaddr;
2454 amd64_emit_call (get_raw_reg_func_addr ());
2455 }
2456
2457 static void
2458 amd64_emit_pop (void)
2459 {
2460 EMIT_ASM (amd64_pop,
2461 "pop %rax");
2462 }
2463
2464 static void
2465 amd64_emit_stack_flush (void)
2466 {
2467 EMIT_ASM (amd64_stack_flush,
2468 "push %rax");
2469 }
2470
2471 static void
2472 amd64_emit_zero_ext (int arg)
2473 {
2474 switch (arg)
2475 {
2476 case 8:
2477 EMIT_ASM (amd64_zero_ext_8,
2478 "and $0xff,%rax");
2479 break;
2480 case 16:
2481 EMIT_ASM (amd64_zero_ext_16,
2482 "and $0xffff,%rax");
2483 break;
2484 case 32:
2485 EMIT_ASM (amd64_zero_ext_32,
2486 "mov $0xffffffff,%rcx\n\t"
2487 "and %rcx,%rax");
2488 break;
2489 default:
2490 emit_error = 1;
2491 }
2492 }
2493
2494 static void
2495 amd64_emit_swap (void)
2496 {
2497 EMIT_ASM (amd64_swap,
2498 "mov %rax,%rcx\n\t"
2499 "pop %rax\n\t"
2500 "push %rcx");
2501 }
2502
2503 static void
2504 amd64_emit_stack_adjust (int n)
2505 {
2506 unsigned char buf[16];
2507 int i;
2508 CORE_ADDR buildaddr = current_insn_ptr;
2509
2510 i = 0;
2511 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2512 buf[i++] = 0x8d;
2513 buf[i++] = 0x64;
2514 buf[i++] = 0x24;
2515 /* This only handles adjustments up to 16, but we don't expect any more. */
2516 buf[i++] = n * 8;
2517 append_insns (&buildaddr, i, buf);
2518 current_insn_ptr = buildaddr;
2519 }
2520
2521 /* FN's prototype is `LONGEST(*fn)(int)'. */
2522
2523 static void
2524 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2525 {
2526 unsigned char buf[16];
2527 int i;
2528 CORE_ADDR buildaddr;
2529
2530 buildaddr = current_insn_ptr;
2531 i = 0;
2532 buf[i++] = 0xbf; /* movl $<n>,%edi */
2533 memcpy (&buf[i], &arg1, sizeof (arg1));
2534 i += 4;
2535 append_insns (&buildaddr, i, buf);
2536 current_insn_ptr = buildaddr;
2537 amd64_emit_call (fn);
2538 }
2539
2540 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2541
2542 static void
2543 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2544 {
2545 unsigned char buf[16];
2546 int i;
2547 CORE_ADDR buildaddr;
2548
2549 buildaddr = current_insn_ptr;
2550 i = 0;
2551 buf[i++] = 0xbf; /* movl $<n>,%edi */
2552 memcpy (&buf[i], &arg1, sizeof (arg1));
2553 i += 4;
2554 append_insns (&buildaddr, i, buf);
2555 current_insn_ptr = buildaddr;
2556 EMIT_ASM (amd64_void_call_2_a,
2557 /* Save away a copy of the stack top. */
2558 "push %rax\n\t"
2559 /* Also pass top as the second argument. */
2560 "mov %rax,%rsi");
2561 amd64_emit_call (fn);
2562 EMIT_ASM (amd64_void_call_2_b,
2563 /* Restore the stack top, %rax may have been trashed. */
2564 "pop %rax");
2565 }
2566
2567 void
2568 amd64_emit_eq_goto (int *offset_p, int *size_p)
2569 {
2570 EMIT_ASM (amd64_eq,
2571 "cmp %rax,(%rsp)\n\t"
2572 "jne .Lamd64_eq_fallthru\n\t"
2573 "lea 0x8(%rsp),%rsp\n\t"
2574 "pop %rax\n\t"
2575 /* jmp, but don't trust the assembler to choose the right jump */
2576 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2577 ".Lamd64_eq_fallthru:\n\t"
2578 "lea 0x8(%rsp),%rsp\n\t"
2579 "pop %rax");
2580
2581 if (offset_p)
2582 *offset_p = 13;
2583 if (size_p)
2584 *size_p = 4;
2585 }
2586
2587 void
2588 amd64_emit_ne_goto (int *offset_p, int *size_p)
2589 {
2590 EMIT_ASM (amd64_ne,
2591 "cmp %rax,(%rsp)\n\t"
2592 "je .Lamd64_ne_fallthru\n\t"
2593 "lea 0x8(%rsp),%rsp\n\t"
2594 "pop %rax\n\t"
2595 /* jmp, but don't trust the assembler to choose the right jump */
2596 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2597 ".Lamd64_ne_fallthru:\n\t"
2598 "lea 0x8(%rsp),%rsp\n\t"
2599 "pop %rax");
2600
2601 if (offset_p)
2602 *offset_p = 13;
2603 if (size_p)
2604 *size_p = 4;
2605 }
2606
2607 void
2608 amd64_emit_lt_goto (int *offset_p, int *size_p)
2609 {
2610 EMIT_ASM (amd64_lt,
2611 "cmp %rax,(%rsp)\n\t"
2612 "jnl .Lamd64_lt_fallthru\n\t"
2613 "lea 0x8(%rsp),%rsp\n\t"
2614 "pop %rax\n\t"
2615 /* jmp, but don't trust the assembler to choose the right jump */
2616 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2617 ".Lamd64_lt_fallthru:\n\t"
2618 "lea 0x8(%rsp),%rsp\n\t"
2619 "pop %rax");
2620
2621 if (offset_p)
2622 *offset_p = 13;
2623 if (size_p)
2624 *size_p = 4;
2625 }
2626
2627 void
2628 amd64_emit_le_goto (int *offset_p, int *size_p)
2629 {
2630 EMIT_ASM (amd64_le,
2631 "cmp %rax,(%rsp)\n\t"
2632 "jnle .Lamd64_le_fallthru\n\t"
2633 "lea 0x8(%rsp),%rsp\n\t"
2634 "pop %rax\n\t"
2635 /* jmp, but don't trust the assembler to choose the right jump */
2636 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2637 ".Lamd64_le_fallthru:\n\t"
2638 "lea 0x8(%rsp),%rsp\n\t"
2639 "pop %rax");
2640
2641 if (offset_p)
2642 *offset_p = 13;
2643 if (size_p)
2644 *size_p = 4;
2645 }
2646
2647 void
2648 amd64_emit_gt_goto (int *offset_p, int *size_p)
2649 {
2650 EMIT_ASM (amd64_gt,
2651 "cmp %rax,(%rsp)\n\t"
2652 "jng .Lamd64_gt_fallthru\n\t"
2653 "lea 0x8(%rsp),%rsp\n\t"
2654 "pop %rax\n\t"
2655 /* jmp, but don't trust the assembler to choose the right jump */
2656 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2657 ".Lamd64_gt_fallthru:\n\t"
2658 "lea 0x8(%rsp),%rsp\n\t"
2659 "pop %rax");
2660
2661 if (offset_p)
2662 *offset_p = 13;
2663 if (size_p)
2664 *size_p = 4;
2665 }
2666
2667 void
2668 amd64_emit_ge_goto (int *offset_p, int *size_p)
2669 {
2670 EMIT_ASM (amd64_ge,
2671 "cmp %rax,(%rsp)\n\t"
2672 "jnge .Lamd64_ge_fallthru\n\t"
2673 ".Lamd64_ge_jump:\n\t"
2674 "lea 0x8(%rsp),%rsp\n\t"
2675 "pop %rax\n\t"
2676 /* jmp, but don't trust the assembler to choose the right jump */
2677 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2678 ".Lamd64_ge_fallthru:\n\t"
2679 "lea 0x8(%rsp),%rsp\n\t"
2680 "pop %rax");
2681
2682 if (offset_p)
2683 *offset_p = 13;
2684 if (size_p)
2685 *size_p = 4;
2686 }
2687
2688 struct emit_ops amd64_emit_ops =
2689 {
2690 amd64_emit_prologue,
2691 amd64_emit_epilogue,
2692 amd64_emit_add,
2693 amd64_emit_sub,
2694 amd64_emit_mul,
2695 amd64_emit_lsh,
2696 amd64_emit_rsh_signed,
2697 amd64_emit_rsh_unsigned,
2698 amd64_emit_ext,
2699 amd64_emit_log_not,
2700 amd64_emit_bit_and,
2701 amd64_emit_bit_or,
2702 amd64_emit_bit_xor,
2703 amd64_emit_bit_not,
2704 amd64_emit_equal,
2705 amd64_emit_less_signed,
2706 amd64_emit_less_unsigned,
2707 amd64_emit_ref,
2708 amd64_emit_if_goto,
2709 amd64_emit_goto,
2710 amd64_write_goto_address,
2711 amd64_emit_const,
2712 amd64_emit_call,
2713 amd64_emit_reg,
2714 amd64_emit_pop,
2715 amd64_emit_stack_flush,
2716 amd64_emit_zero_ext,
2717 amd64_emit_swap,
2718 amd64_emit_stack_adjust,
2719 amd64_emit_int_call_1,
2720 amd64_emit_void_call_2,
2721 amd64_emit_eq_goto,
2722 amd64_emit_ne_goto,
2723 amd64_emit_lt_goto,
2724 amd64_emit_le_goto,
2725 amd64_emit_gt_goto,
2726 amd64_emit_ge_goto
2727 };
2728
2729 #endif /* __x86_64__ */
2730
2731 static void
2732 i386_emit_prologue (void)
2733 {
2734 EMIT_ASM32 (i386_prologue,
2735 "push %ebp\n\t"
2736 "mov %esp,%ebp\n\t"
2737 "push %ebx");
2738 /* At this point, the raw regs base address is at 8(%ebp), and the
2739 value pointer is at 12(%ebp). */
2740 }
2741
2742 static void
2743 i386_emit_epilogue (void)
2744 {
2745 EMIT_ASM32 (i386_epilogue,
2746 "mov 12(%ebp),%ecx\n\t"
2747 "mov %eax,(%ecx)\n\t"
2748 "mov %ebx,0x4(%ecx)\n\t"
2749 "xor %eax,%eax\n\t"
2750 "pop %ebx\n\t"
2751 "pop %ebp\n\t"
2752 "ret");
2753 }
2754
2755 static void
2756 i386_emit_add (void)
2757 {
2758 EMIT_ASM32 (i386_add,
2759 "add (%esp),%eax\n\t"
2760 "adc 0x4(%esp),%ebx\n\t"
2761 "lea 0x8(%esp),%esp");
2762 }
2763
2764 static void
2765 i386_emit_sub (void)
2766 {
2767 EMIT_ASM32 (i386_sub,
2768 "subl %eax,(%esp)\n\t"
2769 "sbbl %ebx,4(%esp)\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx\n\t");
2772 }
2773
2774 static void
2775 i386_emit_mul (void)
2776 {
2777 emit_error = 1;
2778 }
2779
2780 static void
2781 i386_emit_lsh (void)
2782 {
2783 emit_error = 1;
2784 }
2785
2786 static void
2787 i386_emit_rsh_signed (void)
2788 {
2789 emit_error = 1;
2790 }
2791
2792 static void
2793 i386_emit_rsh_unsigned (void)
2794 {
2795 emit_error = 1;
2796 }
2797
2798 static void
2799 i386_emit_ext (int arg)
2800 {
2801 switch (arg)
2802 {
2803 case 8:
2804 EMIT_ASM32 (i386_ext_8,
2805 "cbtw\n\t"
2806 "cwtl\n\t"
2807 "movl %eax,%ebx\n\t"
2808 "sarl $31,%ebx");
2809 break;
2810 case 16:
2811 EMIT_ASM32 (i386_ext_16,
2812 "cwtl\n\t"
2813 "movl %eax,%ebx\n\t"
2814 "sarl $31,%ebx");
2815 break;
2816 case 32:
2817 EMIT_ASM32 (i386_ext_32,
2818 "movl %eax,%ebx\n\t"
2819 "sarl $31,%ebx");
2820 break;
2821 default:
2822 emit_error = 1;
2823 }
2824 }
2825
2826 static void
2827 i386_emit_log_not (void)
2828 {
2829 EMIT_ASM32 (i386_log_not,
2830 "or %ebx,%eax\n\t"
2831 "test %eax,%eax\n\t"
2832 "sete %cl\n\t"
2833 "xor %ebx,%ebx\n\t"
2834 "movzbl %cl,%eax");
2835 }
2836
2837 static void
2838 i386_emit_bit_and (void)
2839 {
2840 EMIT_ASM32 (i386_and,
2841 "and (%esp),%eax\n\t"
2842 "and 0x4(%esp),%ebx\n\t"
2843 "lea 0x8(%esp),%esp");
2844 }
2845
2846 static void
2847 i386_emit_bit_or (void)
2848 {
2849 EMIT_ASM32 (i386_or,
2850 "or (%esp),%eax\n\t"
2851 "or 0x4(%esp),%ebx\n\t"
2852 "lea 0x8(%esp),%esp");
2853 }
2854
2855 static void
2856 i386_emit_bit_xor (void)
2857 {
2858 EMIT_ASM32 (i386_xor,
2859 "xor (%esp),%eax\n\t"
2860 "xor 0x4(%esp),%ebx\n\t"
2861 "lea 0x8(%esp),%esp");
2862 }
2863
2864 static void
2865 i386_emit_bit_not (void)
2866 {
2867 EMIT_ASM32 (i386_bit_not,
2868 "xor $0xffffffff,%eax\n\t"
2869 "xor $0xffffffff,%ebx\n\t");
2870 }
2871
2872 static void
2873 i386_emit_equal (void)
2874 {
2875 EMIT_ASM32 (i386_equal,
2876 "cmpl %ebx,4(%esp)\n\t"
2877 "jne .Li386_equal_false\n\t"
2878 "cmpl %eax,(%esp)\n\t"
2879 "je .Li386_equal_true\n\t"
2880 ".Li386_equal_false:\n\t"
2881 "xor %eax,%eax\n\t"
2882 "jmp .Li386_equal_end\n\t"
2883 ".Li386_equal_true:\n\t"
2884 "mov $1,%eax\n\t"
2885 ".Li386_equal_end:\n\t"
2886 "xor %ebx,%ebx\n\t"
2887 "lea 0x8(%esp),%esp");
2888 }
2889
2890 static void
2891 i386_emit_less_signed (void)
2892 {
2893 EMIT_ASM32 (i386_less_signed,
2894 "cmpl %ebx,4(%esp)\n\t"
2895 "jl .Li386_less_signed_true\n\t"
2896 "jne .Li386_less_signed_false\n\t"
2897 "cmpl %eax,(%esp)\n\t"
2898 "jl .Li386_less_signed_true\n\t"
2899 ".Li386_less_signed_false:\n\t"
2900 "xor %eax,%eax\n\t"
2901 "jmp .Li386_less_signed_end\n\t"
2902 ".Li386_less_signed_true:\n\t"
2903 "mov $1,%eax\n\t"
2904 ".Li386_less_signed_end:\n\t"
2905 "xor %ebx,%ebx\n\t"
2906 "lea 0x8(%esp),%esp");
2907 }
2908
2909 static void
2910 i386_emit_less_unsigned (void)
2911 {
2912 EMIT_ASM32 (i386_less_unsigned,
2913 "cmpl %ebx,4(%esp)\n\t"
2914 "jb .Li386_less_unsigned_true\n\t"
2915 "jne .Li386_less_unsigned_false\n\t"
2916 "cmpl %eax,(%esp)\n\t"
2917 "jb .Li386_less_unsigned_true\n\t"
2918 ".Li386_less_unsigned_false:\n\t"
2919 "xor %eax,%eax\n\t"
2920 "jmp .Li386_less_unsigned_end\n\t"
2921 ".Li386_less_unsigned_true:\n\t"
2922 "mov $1,%eax\n\t"
2923 ".Li386_less_unsigned_end:\n\t"
2924 "xor %ebx,%ebx\n\t"
2925 "lea 0x8(%esp),%esp");
2926 }
2927
2928 static void
2929 i386_emit_ref (int size)
2930 {
2931 switch (size)
2932 {
2933 case 1:
2934 EMIT_ASM32 (i386_ref1,
2935 "movb (%eax),%al");
2936 break;
2937 case 2:
2938 EMIT_ASM32 (i386_ref2,
2939 "movw (%eax),%ax");
2940 break;
2941 case 4:
2942 EMIT_ASM32 (i386_ref4,
2943 "movl (%eax),%eax");
2944 break;
2945 case 8:
2946 EMIT_ASM32 (i386_ref8,
2947 "movl 4(%eax),%ebx\n\t"
2948 "movl (%eax),%eax");
2949 break;
2950 }
2951 }
2952
2953 static void
2954 i386_emit_if_goto (int *offset_p, int *size_p)
2955 {
2956 EMIT_ASM32 (i386_if_goto,
2957 "mov %eax,%ecx\n\t"
2958 "or %ebx,%ecx\n\t"
2959 "pop %eax\n\t"
2960 "pop %ebx\n\t"
2961 "cmpl $0,%ecx\n\t"
2962 /* Don't trust the assembler to choose the right jump */
2963 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2964
2965 if (offset_p)
2966 *offset_p = 11; /* be sure that this matches the sequence above */
2967 if (size_p)
2968 *size_p = 4;
2969 }
2970
2971 static void
2972 i386_emit_goto (int *offset_p, int *size_p)
2973 {
2974 EMIT_ASM32 (i386_goto,
2975 /* Don't trust the assembler to choose the right jump */
2976 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2977 if (offset_p)
2978 *offset_p = 1;
2979 if (size_p)
2980 *size_p = 4;
2981 }
2982
2983 static void
2984 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2985 {
2986 int diff = (to - (from + size));
2987 unsigned char buf[sizeof (int)];
2988
2989 /* We're only doing 4-byte sizes at the moment. */
2990 if (size != 4)
2991 {
2992 emit_error = 1;
2993 return;
2994 }
2995
2996 memcpy (buf, &diff, sizeof (int));
2997 write_inferior_memory (from, buf, sizeof (int));
2998 }
2999
3000 static void
3001 i386_emit_const (LONGEST num)
3002 {
3003 unsigned char buf[16];
3004 int i, hi, lo;
3005 CORE_ADDR buildaddr = current_insn_ptr;
3006
3007 i = 0;
3008 buf[i++] = 0xb8; /* mov $<n>,%eax */
3009 lo = num & 0xffffffff;
3010 memcpy (&buf[i], &lo, sizeof (lo));
3011 i += 4;
3012 hi = ((num >> 32) & 0xffffffff);
3013 if (hi)
3014 {
3015 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3016 memcpy (&buf[i], &hi, sizeof (hi));
3017 i += 4;
3018 }
3019 else
3020 {
3021 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3022 }
3023 append_insns (&buildaddr, i, buf);
3024 current_insn_ptr = buildaddr;
3025 }
3026
3027 static void
3028 i386_emit_call (CORE_ADDR fn)
3029 {
3030 unsigned char buf[16];
3031 int i, offset;
3032 CORE_ADDR buildaddr;
3033
3034 buildaddr = current_insn_ptr;
3035 i = 0;
3036 buf[i++] = 0xe8; /* call <reladdr> */
3037 offset = ((int) fn) - (buildaddr + 5);
3038 memcpy (buf + 1, &offset, 4);
3039 append_insns (&buildaddr, 5, buf);
3040 current_insn_ptr = buildaddr;
3041 }
3042
3043 static void
3044 i386_emit_reg (int reg)
3045 {
3046 unsigned char buf[16];
3047 int i;
3048 CORE_ADDR buildaddr;
3049
3050 EMIT_ASM32 (i386_reg_a,
3051 "sub $0x8,%esp");
3052 buildaddr = current_insn_ptr;
3053 i = 0;
3054 buf[i++] = 0xb8; /* mov $<n>,%eax */
3055 memcpy (&buf[i], &reg, sizeof (reg));
3056 i += 4;
3057 append_insns (&buildaddr, i, buf);
3058 current_insn_ptr = buildaddr;
3059 EMIT_ASM32 (i386_reg_b,
3060 "mov %eax,4(%esp)\n\t"
3061 "mov 8(%ebp),%eax\n\t"
3062 "mov %eax,(%esp)");
3063 i386_emit_call (get_raw_reg_func_addr ());
3064 EMIT_ASM32 (i386_reg_c,
3065 "xor %ebx,%ebx\n\t"
3066 "lea 0x8(%esp),%esp");
3067 }
3068
3069 static void
3070 i386_emit_pop (void)
3071 {
3072 EMIT_ASM32 (i386_pop,
3073 "pop %eax\n\t"
3074 "pop %ebx");
3075 }
3076
3077 static void
3078 i386_emit_stack_flush (void)
3079 {
3080 EMIT_ASM32 (i386_stack_flush,
3081 "push %ebx\n\t"
3082 "push %eax");
3083 }
3084
3085 static void
3086 i386_emit_zero_ext (int arg)
3087 {
3088 switch (arg)
3089 {
3090 case 8:
3091 EMIT_ASM32 (i386_zero_ext_8,
3092 "and $0xff,%eax\n\t"
3093 "xor %ebx,%ebx");
3094 break;
3095 case 16:
3096 EMIT_ASM32 (i386_zero_ext_16,
3097 "and $0xffff,%eax\n\t"
3098 "xor %ebx,%ebx");
3099 break;
3100 case 32:
3101 EMIT_ASM32 (i386_zero_ext_32,
3102 "xor %ebx,%ebx");
3103 break;
3104 default:
3105 emit_error = 1;
3106 }
3107 }
3108
3109 static void
3110 i386_emit_swap (void)
3111 {
3112 EMIT_ASM32 (i386_swap,
3113 "mov %eax,%ecx\n\t"
3114 "mov %ebx,%edx\n\t"
3115 "pop %eax\n\t"
3116 "pop %ebx\n\t"
3117 "push %edx\n\t"
3118 "push %ecx");
3119 }
3120
3121 static void
3122 i386_emit_stack_adjust (int n)
3123 {
3124 unsigned char buf[16];
3125 int i;
3126 CORE_ADDR buildaddr = current_insn_ptr;
3127
3128 i = 0;
3129 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3130 buf[i++] = 0x64;
3131 buf[i++] = 0x24;
3132 buf[i++] = n * 8;
3133 append_insns (&buildaddr, i, buf);
3134 current_insn_ptr = buildaddr;
3135 }
3136
3137 /* FN's prototype is `LONGEST(*fn)(int)'. */
3138
3139 static void
3140 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3141 {
3142 unsigned char buf[16];
3143 int i;
3144 CORE_ADDR buildaddr;
3145
3146 EMIT_ASM32 (i386_int_call_1_a,
3147 /* Reserve a bit of stack space. */
3148 "sub $0x8,%esp");
3149 /* Put the one argument on the stack. */
3150 buildaddr = current_insn_ptr;
3151 i = 0;
3152 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3153 buf[i++] = 0x04;
3154 buf[i++] = 0x24;
3155 memcpy (&buf[i], &arg1, sizeof (arg1));
3156 i += 4;
3157 append_insns (&buildaddr, i, buf);
3158 current_insn_ptr = buildaddr;
3159 i386_emit_call (fn);
3160 EMIT_ASM32 (i386_int_call_1_c,
3161 "mov %edx,%ebx\n\t"
3162 "lea 0x8(%esp),%esp");
3163 }
3164
3165 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3166
3167 static void
3168 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3169 {
3170 unsigned char buf[16];
3171 int i;
3172 CORE_ADDR buildaddr;
3173
3174 EMIT_ASM32 (i386_void_call_2_a,
3175 /* Preserve %eax only; we don't have to worry about %ebx. */
3176 "push %eax\n\t"
3177 /* Reserve a bit of stack space for arguments. */
3178 "sub $0x10,%esp\n\t"
3179 /* Copy "top" to the second argument position. (Note that
3180 we can't assume function won't scribble on its
3181 arguments, so don't try to restore from this.) */
3182 "mov %eax,4(%esp)\n\t"
3183 "mov %ebx,8(%esp)");
3184 /* Put the first argument on the stack. */
3185 buildaddr = current_insn_ptr;
3186 i = 0;
3187 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3188 buf[i++] = 0x04;
3189 buf[i++] = 0x24;
3190 memcpy (&buf[i], &arg1, sizeof (arg1));
3191 i += 4;
3192 append_insns (&buildaddr, i, buf);
3193 current_insn_ptr = buildaddr;
3194 i386_emit_call (fn);
3195 EMIT_ASM32 (i386_void_call_2_b,
3196 "lea 0x10(%esp),%esp\n\t"
3197 /* Restore original stack top. */
3198 "pop %eax");
3199 }
3200
3201
3202 void
3203 i386_emit_eq_goto (int *offset_p, int *size_p)
3204 {
3205 EMIT_ASM32 (eq,
3206 /* Check low half first, more likely to be decider */
3207 "cmpl %eax,(%esp)\n\t"
3208 "jne .Leq_fallthru\n\t"
3209 "cmpl %ebx,4(%esp)\n\t"
3210 "jne .Leq_fallthru\n\t"
3211 "lea 0x8(%esp),%esp\n\t"
3212 "pop %eax\n\t"
3213 "pop %ebx\n\t"
3214 /* jmp, but don't trust the assembler to choose the right jump */
3215 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3216 ".Leq_fallthru:\n\t"
3217 "lea 0x8(%esp),%esp\n\t"
3218 "pop %eax\n\t"
3219 "pop %ebx");
3220
3221 if (offset_p)
3222 *offset_p = 18;
3223 if (size_p)
3224 *size_p = 4;
3225 }
3226
3227 void
3228 i386_emit_ne_goto (int *offset_p, int *size_p)
3229 {
3230 EMIT_ASM32 (ne,
3231 /* Check low half first, more likely to be decider */
3232 "cmpl %eax,(%esp)\n\t"
3233 "jne .Lne_jump\n\t"
3234 "cmpl %ebx,4(%esp)\n\t"
3235 "je .Lne_fallthru\n\t"
3236 ".Lne_jump:\n\t"
3237 "lea 0x8(%esp),%esp\n\t"
3238 "pop %eax\n\t"
3239 "pop %ebx\n\t"
3240 /* jmp, but don't trust the assembler to choose the right jump */
3241 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3242 ".Lne_fallthru:\n\t"
3243 "lea 0x8(%esp),%esp\n\t"
3244 "pop %eax\n\t"
3245 "pop %ebx");
3246
3247 if (offset_p)
3248 *offset_p = 18;
3249 if (size_p)
3250 *size_p = 4;
3251 }
3252
3253 void
3254 i386_emit_lt_goto (int *offset_p, int *size_p)
3255 {
3256 EMIT_ASM32 (lt,
3257 "cmpl %ebx,4(%esp)\n\t"
3258 "jl .Llt_jump\n\t"
3259 "jne .Llt_fallthru\n\t"
3260 "cmpl %eax,(%esp)\n\t"
3261 "jnl .Llt_fallthru\n\t"
3262 ".Llt_jump:\n\t"
3263 "lea 0x8(%esp),%esp\n\t"
3264 "pop %eax\n\t"
3265 "pop %ebx\n\t"
3266 /* jmp, but don't trust the assembler to choose the right jump */
3267 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3268 ".Llt_fallthru:\n\t"
3269 "lea 0x8(%esp),%esp\n\t"
3270 "pop %eax\n\t"
3271 "pop %ebx");
3272
3273 if (offset_p)
3274 *offset_p = 20;
3275 if (size_p)
3276 *size_p = 4;
3277 }
3278
3279 void
3280 i386_emit_le_goto (int *offset_p, int *size_p)
3281 {
3282 EMIT_ASM32 (le,
3283 "cmpl %ebx,4(%esp)\n\t"
3284 "jle .Lle_jump\n\t"
3285 "jne .Lle_fallthru\n\t"
3286 "cmpl %eax,(%esp)\n\t"
3287 "jnle .Lle_fallthru\n\t"
3288 ".Lle_jump:\n\t"
3289 "lea 0x8(%esp),%esp\n\t"
3290 "pop %eax\n\t"
3291 "pop %ebx\n\t"
3292 /* jmp, but don't trust the assembler to choose the right jump */
3293 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3294 ".Lle_fallthru:\n\t"
3295 "lea 0x8(%esp),%esp\n\t"
3296 "pop %eax\n\t"
3297 "pop %ebx");
3298
3299 if (offset_p)
3300 *offset_p = 20;
3301 if (size_p)
3302 *size_p = 4;
3303 }
3304
3305 void
3306 i386_emit_gt_goto (int *offset_p, int *size_p)
3307 {
3308 EMIT_ASM32 (gt,
3309 "cmpl %ebx,4(%esp)\n\t"
3310 "jg .Lgt_jump\n\t"
3311 "jne .Lgt_fallthru\n\t"
3312 "cmpl %eax,(%esp)\n\t"
3313 "jng .Lgt_fallthru\n\t"
3314 ".Lgt_jump:\n\t"
3315 "lea 0x8(%esp),%esp\n\t"
3316 "pop %eax\n\t"
3317 "pop %ebx\n\t"
3318 /* jmp, but don't trust the assembler to choose the right jump */
3319 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3320 ".Lgt_fallthru:\n\t"
3321 "lea 0x8(%esp),%esp\n\t"
3322 "pop %eax\n\t"
3323 "pop %ebx");
3324
3325 if (offset_p)
3326 *offset_p = 20;
3327 if (size_p)
3328 *size_p = 4;
3329 }
3330
3331 void
3332 i386_emit_ge_goto (int *offset_p, int *size_p)
3333 {
3334 EMIT_ASM32 (ge,
3335 "cmpl %ebx,4(%esp)\n\t"
3336 "jge .Lge_jump\n\t"
3337 "jne .Lge_fallthru\n\t"
3338 "cmpl %eax,(%esp)\n\t"
3339 "jnge .Lge_fallthru\n\t"
3340 ".Lge_jump:\n\t"
3341 "lea 0x8(%esp),%esp\n\t"
3342 "pop %eax\n\t"
3343 "pop %ebx\n\t"
3344 /* jmp, but don't trust the assembler to choose the right jump */
3345 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3346 ".Lge_fallthru:\n\t"
3347 "lea 0x8(%esp),%esp\n\t"
3348 "pop %eax\n\t"
3349 "pop %ebx");
3350
3351 if (offset_p)
3352 *offset_p = 20;
3353 if (size_p)
3354 *size_p = 4;
3355 }
3356
3357 struct emit_ops i386_emit_ops =
3358 {
3359 i386_emit_prologue,
3360 i386_emit_epilogue,
3361 i386_emit_add,
3362 i386_emit_sub,
3363 i386_emit_mul,
3364 i386_emit_lsh,
3365 i386_emit_rsh_signed,
3366 i386_emit_rsh_unsigned,
3367 i386_emit_ext,
3368 i386_emit_log_not,
3369 i386_emit_bit_and,
3370 i386_emit_bit_or,
3371 i386_emit_bit_xor,
3372 i386_emit_bit_not,
3373 i386_emit_equal,
3374 i386_emit_less_signed,
3375 i386_emit_less_unsigned,
3376 i386_emit_ref,
3377 i386_emit_if_goto,
3378 i386_emit_goto,
3379 i386_write_goto_address,
3380 i386_emit_const,
3381 i386_emit_call,
3382 i386_emit_reg,
3383 i386_emit_pop,
3384 i386_emit_stack_flush,
3385 i386_emit_zero_ext,
3386 i386_emit_swap,
3387 i386_emit_stack_adjust,
3388 i386_emit_int_call_1,
3389 i386_emit_void_call_2,
3390 i386_emit_eq_goto,
3391 i386_emit_ne_goto,
3392 i386_emit_lt_goto,
3393 i386_emit_le_goto,
3394 i386_emit_gt_goto,
3395 i386_emit_ge_goto
3396 };
3397
3398
3399 static struct emit_ops *
3400 x86_emit_ops (void)
3401 {
3402 #ifdef __x86_64__
3403 if (is_64bit_tdesc ())
3404 return &amd64_emit_ops;
3405 else
3406 #endif
3407 return &i386_emit_ops;
3408 }
3409
3410 static int
3411 x86_supports_range_stepping (void)
3412 {
3413 return 1;
3414 }
3415
3416 /* This is initialized assuming an amd64 target.
3417 x86_arch_setup will correct it for i386 or amd64 targets. */
3418
3419 struct linux_target_ops the_low_target =
3420 {
3421 x86_arch_setup,
3422 x86_linux_regs_info,
3423 x86_cannot_fetch_register,
3424 x86_cannot_store_register,
3425 NULL, /* fetch_register */
3426 x86_get_pc,
3427 x86_set_pc,
3428 x86_breakpoint,
3429 x86_breakpoint_len,
3430 NULL,
3431 1,
3432 x86_breakpoint_at,
3433 x86_supports_z_point_type,
3434 x86_insert_point,
3435 x86_remove_point,
3436 x86_stopped_by_watchpoint,
3437 x86_stopped_data_address,
3438 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3439 native i386 case (no registers smaller than an xfer unit), and are not
3440 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3441 NULL,
3442 NULL,
3443 /* need to fix up i386 siginfo if host is amd64 */
3444 x86_siginfo_fixup,
3445 x86_linux_new_process,
3446 x86_linux_new_thread,
3447 x86_linux_prepare_to_resume,
3448 x86_linux_process_qsupported,
3449 x86_supports_tracepoints,
3450 x86_get_thread_area,
3451 x86_install_fast_tracepoint_jump_pad,
3452 x86_emit_ops,
3453 x86_get_min_fast_tracepoint_insn_len,
3454 x86_supports_range_stepping,
3455 };
3456
3457 void
3458 initialize_low_arch (void)
3459 {
3460 /* Initialize the Linux target descriptions. */
3461 #ifdef __x86_64__
3462 init_registers_amd64_linux ();
3463 init_registers_amd64_avx_linux ();
3464 init_registers_amd64_avx512_linux ();
3465 init_registers_amd64_mpx_linux ();
3466
3467 init_registers_x32_linux ();
3468 init_registers_x32_avx_linux ();
3469 init_registers_x32_avx512_linux ();
3470
3471 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3472 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3473 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3474 #endif
3475 init_registers_i386_linux ();
3476 init_registers_i386_mmx_linux ();
3477 init_registers_i386_avx_linux ();
3478 init_registers_i386_avx512_linux ();
3479 init_registers_i386_mpx_linux ();
3480
3481 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3482 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3483 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3484
3485 initialize_regsets_info (&x86_regsets_info);
3486 }
This page took 0.105176 seconds and 4 git commands to generate.