gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
index 664d0d9f19912cf924eebaf6a44f5d540e82cfc3..7a65c1d079f86e96d1b0ee3f816e358a0bbdb6f4 100644 (file)
@@ -110,6 +110,22 @@ public:
 
   bool supports_tracepoints () override;
 
+  bool supports_fast_tracepoints () override;
+
+  int install_fast_tracepoint_jump_pad
+    (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
+     CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
+     CORE_ADDR *trampoline, ULONGEST *trampoline_size,
+     unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
+     CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
+     char *err) override;
+
+  int get_min_fast_tracepoint_insn_len () override;
+
+  struct emit_ops *emit_ops () override;
+
+  int get_ipa_tdesc_idx () override;
+
 protected:
 
   void low_arch_setup () override;
@@ -160,6 +176,12 @@ protected:
 
   int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
 
+  bool low_supports_range_stepping () override;
+
+  bool low_supports_catch_syscall () override;
+
+  void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
+
 private:
 
   /* Update all the target description of all processes; a new GDB
@@ -211,11 +233,7 @@ static const int x86_64_regmap[] =
   -1,
   -1, -1, -1, -1, -1, -1, -1, -1,
   ORIG_RAX * 8,
-#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
   21 * 8,  22 * 8,
-#else
-  -1, -1,
-#endif
   -1, -1, -1, -1,                      /* MPX registers BND0 ... BND3.  */
   -1, -1,                              /* MPX registers BNDCFGU, BNDSTATUS.  */
   -1, -1, -1, -1, -1, -1, -1, -1,       /* xmm16 ... xmm31 (AVX512)  */
@@ -391,19 +409,6 @@ x86_fill_gregset (struct regcache *regcache, void *buf)
        if (x86_64_regmap[i] != -1)
          collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
 
-#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
-      {
-        unsigned long base;
-        int lwpid = lwpid_of (current_thread);
-
-        collect_register_by_name (regcache, "fs_base", &base);
-        ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
-
-        collect_register_by_name (regcache, "gs_base", &base);
-        ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
-      }
-#endif
-
       return;
     }
 
@@ -446,18 +451,6 @@ x86_store_gregset (struct regcache *regcache, const void *buf)
        if (x86_64_regmap[i] != -1)
          supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
 
-#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
-      {
-        unsigned long base;
-        int lwpid = lwpid_of (current_thread);
-
-        if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
-          supply_register_by_name (regcache, "fs_base", &base);
-
-        if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
-          supply_register_by_name (regcache, "gs_base", &base);
-      }
-#endif
       return;
     }
 #endif
@@ -1094,11 +1087,17 @@ x86_target::low_arch_setup ()
   current_process ()->tdesc = x86_linux_read_description ();
 }
 
+bool
+x86_target::low_supports_catch_syscall ()
+{
+  return true;
+}
+
 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
    code.  This should only be called if LWP got a SYSCALL_SIGTRAP.  */
 
-static void
-x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
+void
+x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
 {
   int use_64bit = register_size (regcache->tdesc, 0) == 8;
 
@@ -1525,19 +1524,26 @@ i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
   return 0;
 }
 
-static int
-x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
-                                     CORE_ADDR collector,
-                                     CORE_ADDR lockaddr,
-                                     ULONGEST orig_size,
-                                     CORE_ADDR *jump_entry,
-                                     CORE_ADDR *trampoline,
-                                     ULONGEST *trampoline_size,
-                                     unsigned char *jjump_pad_insn,
-                                     ULONGEST *jjump_pad_insn_size,
-                                     CORE_ADDR *adjusted_insn_addr,
-                                     CORE_ADDR *adjusted_insn_addr_end,
-                                     char *err)
+bool
+x86_target::supports_fast_tracepoints ()
+{
+  return true;
+}
+
+int
+x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
+                                             CORE_ADDR tpaddr,
+                                             CORE_ADDR collector,
+                                             CORE_ADDR lockaddr,
+                                             ULONGEST orig_size,
+                                             CORE_ADDR *jump_entry,
+                                             CORE_ADDR *trampoline,
+                                             ULONGEST *trampoline_size,
+                                             unsigned char *jjump_pad_insn,
+                                             ULONGEST *jjump_pad_insn_size,
+                                             CORE_ADDR *adjusted_insn_addr,
+                                             CORE_ADDR *adjusted_insn_addr_end,
+                                             char *err)
 {
 #ifdef __x86_64__
   if (is_64bit_tdesc ())
@@ -1566,8 +1572,8 @@ x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
    architectures.  */
 
-static int
-x86_get_min_fast_tracepoint_insn_len (void)
+int
+x86_target::get_min_fast_tracepoint_insn_len ()
 {
   static int warned_about_fast_tracepoints = 0;
 
@@ -2915,8 +2921,8 @@ struct emit_ops i386_emit_ops =
   };
 
 
-static struct emit_ops *
-x86_emit_ops (void)
+emit_ops *
+x86_target::emit_ops ()
 {
 #ifdef __x86_64__
   if (is_64bit_tdesc ())
@@ -2935,23 +2941,14 @@ x86_target::sw_breakpoint_from_kind (int kind, int *size)
   return x86_breakpoint;
 }
 
-static int
-x86_supports_range_stepping (void)
-{
-  return 1;
-}
-
-/* Implementation of linux_target_ops method "supports_hardware_single_step".
- */
-
-static int
-x86_supports_hardware_single_step (void)
+bool
+x86_target::low_supports_range_stepping ()
 {
-  return 1;
+  return true;
 }
 
-static int
-x86_get_ipa_tdesc_idx (void)
+int
+x86_target::get_ipa_tdesc_idx ()
 {
   struct regcache *regcache = get_thread_regcache (current_thread, 0);
   const struct target_desc *tdesc = regcache->tdesc;
@@ -2966,20 +2963,6 @@ x86_get_ipa_tdesc_idx (void)
   return i386_get_ipa_tdesc_idx (tdesc);
 }
 
-/* This is initialized assuming an amd64 target.
-   x86_arch_setup will correct it for i386 or amd64 targets.  */
-
-struct linux_target_ops the_low_target =
-{
-  x86_install_fast_tracepoint_jump_pad,
-  x86_emit_ops,
-  x86_get_min_fast_tracepoint_insn_len,
-  x86_supports_range_stepping,
-  x86_supports_hardware_single_step,
-  x86_get_syscall_trapinfo,
-  x86_get_ipa_tdesc_idx,
-};
-
 /* The linux target ops object.  */
 
 linux_process_target *the_linux_target = &the_x86_target;
This page took 0.025957 seconds and 4 git commands to generate.