Fix TLS to LE optimization for x32
authorH.J. Lu <hjl.tools@gmail.com>
Tue, 20 Nov 2012 05:56:06 +0000 (05:56 +0000)
committerH.J. Lu <hjl.tools@gmail.com>
Tue, 20 Nov 2012 05:56:06 +0000 (05:56 +0000)
PR gold/14858
* x86_64.cc (Relocate::tls_ld_to_le): Support x32.

gold/ChangeLog
gold/x86_64.cc

index d5cdd32f6353f3bbb87823cec0e046019e2a616c..9a2868e05548446ed87e2c3142878f604e2df59d 100644 (file)
@@ -1,3 +1,8 @@
+2012-11-19  H.J. Lu  <hongjiu.lu@intel.com>
+
+       PR gold/14858
+       * x86_64.cc (Relocate::tls_ld_to_le): Support x32.
+
 2012-11-14  Roland McGrath  <mcgrathr@google.com>
 
        * arm.cc (Output_data_plt_arm_nacl::first_plt_entry): Use bic rather
index 63421961b678b9efca14e76b4a78016c15e5f058..8119983df2ec180fbfc3ff720f923a9dcb43350e 100644 (file)
@@ -3965,8 +3965,12 @@ Target_x86_64<size>::Relocate::tls_ld_to_le(
     section_size_type view_size)
 {
   // leaq foo@tlsld(%rip),%rdi; call __tls_get_addr@plt;
+  // For SIZE == 64:
   // ... leq foo@dtpoff(%rax),%reg
   // ==> .word 0x6666; .byte 0x66; movq %fs:0,%rax ... leaq x@tpoff(%rax),%rdx
+  // For SIZE == 32:
+  // ... leq foo@dtpoff(%rax),%reg
+  // ==> nopl 0x0(%rax); movl %fs:0,%eax ... leaq x@tpoff(%rax),%rdx
 
   tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
   tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 9);
@@ -3976,7 +3980,10 @@ Target_x86_64<size>::Relocate::tls_ld_to_le(
 
   tls::check_tls(relinfo, relnum, rela.get_r_offset(), view[4] == 0xe8);
 
-  memcpy(view - 3, "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0\0", 12);
+  if (size == 64)
+    memcpy(view - 3, "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0\0", 12);
+  else
+    memcpy(view - 3, "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0\0", 12);
 
   // The next reloc should be a PLT32 reloc against __tls_get_addr.
   // We can skip it.
This page took 0.045618 seconds and 4 git commands to generate.