x86/asm/entry: Create and use a 'TOP_OF_KERNEL_STACK_PADDING' macro
[deliverable/linux.git] / arch / x86 / include / asm / processor.h
index ec1c93588cefd0c4e3a4705c6d966e2555ba95aa..88d9aa745898c64d790cdf0ab0a976f3c9592b38 100644 (file)
@@ -282,7 +282,11 @@ struct tss_struct {
 
 } ____cacheline_aligned;
 
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+
+#ifdef CONFIG_X86_32
+DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#endif
 
 /*
  * Save the original ist values for checking stack pointers during debugging
@@ -564,6 +568,16 @@ static inline void native_swapgs(void)
 #endif
 }
 
+static inline unsigned long current_top_of_stack(void)
+{
+#ifdef CONFIG_X86_64
+       return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
+#else
+       /* sp0 on x86_32 is special in and around vm86 mode. */
+       return this_cpu_read_stable(cpu_current_top_of_stack);
+#endif
+}
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -761,10 +775,10 @@ extern char                       ignore_fpu_irq;
 #define ARCH_HAS_SPINLOCK_PREFETCH
 
 #ifdef CONFIG_X86_32
-# define BASE_PREFETCH         ASM_NOP4
+# define BASE_PREFETCH         ""
 # define ARCH_HAS_PREFETCH
 #else
-# define BASE_PREFETCH         "prefetcht0 (%1)"
+# define BASE_PREFETCH         "prefetcht0 %P1"
 #endif
 
 /*
@@ -775,10 +789,9 @@ extern char                        ignore_fpu_irq;
  */
 static inline void prefetch(const void *x)
 {
-       alternative_input(BASE_PREFETCH,
-                         "prefetchnta (%1)",
+       alternative_input(BASE_PREFETCH, "prefetchnta %P1",
                          X86_FEATURE_XMM,
-                         "r" (x));
+                         "m" (*(const char *)x));
 }
 
 /*
@@ -788,10 +801,9 @@ static inline void prefetch(const void *x)
  */
 static inline void prefetchw(const void *x)
 {
-       alternative_input(BASE_PREFETCH,
-                         "prefetchw (%1)",
-                         X86_FEATURE_3DNOW,
-                         "r" (x));
+       alternative_input(BASE_PREFETCH, "prefetchw %P1",
+                         X86_FEATURE_3DNOWPREFETCH,
+                         "m" (*(const char *)x));
 }
 
 static inline void spin_lock_prefetch(const void *x)
@@ -815,22 +827,6 @@ static inline void spin_lock_prefetch(const void *x)
        .io_bitmap_ptr          = NULL,                                   \
 }
 
-/*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
-#define INIT_TSS  {                                                      \
-       .x86_tss = {                                                      \
-               .sp0            = sizeof(init_stack) + (long)&init_stack, \
-               .ss0            = __KERNEL_DS,                            \
-               .ss1            = __KERNEL_CS,                            \
-               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,               \
-        },                                                               \
-       .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },       \
-}
-
 extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
@@ -853,7 +849,8 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 #define task_pt_regs(task)                                             \
 ({                                                                     \
        struct pt_regs *__regs__;                                       \
-       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
+       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task)) - \
+                                    TOP_OF_KERNEL_STACK_PADDING);     \
        __regs__ - 1;                                                   \
 })
 
@@ -889,10 +886,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
        .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
 }
 
-#define INIT_TSS  { \
-       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
 /*
  * Return saved PC of a blocked thread.
  * What is this good for? it will be always the scheduler or ret_from_fork.
This page took 0.025847 seconds and 5 git commands to generate.