Commit | Line | Data |
---|---|---|
f05e798a DH |
1 | #ifndef _ASM_X86_SWITCH_TO_H |
2 | #define _ASM_X86_SWITCH_TO_H | |
3 | ||
4 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | |
35ea7903 AK |
5 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
6 | struct task_struct *next); | |
f05e798a DH |
7 | struct tss_struct; |
8 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
9 | struct tss_struct *tss); | |
10 | ||
11 | #ifdef CONFIG_X86_32 | |
12 | ||
13 | #ifdef CONFIG_CC_STACKPROTECTOR | |
14 | #define __switch_canary \ | |
15 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | |
16 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | |
17 | #define __switch_canary_oparam \ | |
18 | , [stack_canary] "=m" (stack_canary.canary) | |
19 | #define __switch_canary_iparam \ | |
20 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | |
21 | #else /* CC_STACKPROTECTOR */ | |
22 | #define __switch_canary | |
23 | #define __switch_canary_oparam | |
24 | #define __switch_canary_iparam | |
25 | #endif /* CC_STACKPROTECTOR */ | |
26 | ||
27 | /* | |
28 | * Saving eflags is important. It switches not only IOPL between tasks, | |
29 | * it also protects other tasks from NT leaking through sysenter etc. | |
30 | */ | |
31 | #define switch_to(prev, next, last) \ | |
32 | do { \ | |
33 | /* \ | |
34 | * Context-switching clobbers all registers, so we clobber \ | |
35 | * them explicitly, via unused output variables. \ | |
36 | * (EAX and EBP is not listed because EBP is saved/restored \ | |
37 | * explicitly for wchan access and EAX is the return value of \ | |
38 | * __switch_to()) \ | |
39 | */ \ | |
40 | unsigned long ebx, ecx, edx, esi, edi; \ | |
41 | \ | |
42 | asm volatile("pushfl\n\t" /* save flags */ \ | |
43 | "pushl %%ebp\n\t" /* save EBP */ \ | |
44 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | |
45 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | |
46 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | |
47 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | |
48 | __switch_canary \ | |
49 | "jmp __switch_to\n" /* regparm call */ \ | |
50 | "1:\t" \ | |
51 | "popl %%ebp\n\t" /* restore EBP */ \ | |
52 | "popfl\n" /* restore flags */ \ | |
53 | \ | |
54 | /* output parameters */ \ | |
55 | : [prev_sp] "=m" (prev->thread.sp), \ | |
56 | [prev_ip] "=m" (prev->thread.ip), \ | |
57 | "=a" (last), \ | |
58 | \ | |
59 | /* clobbered output registers: */ \ | |
60 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | |
61 | "=S" (esi), "=D" (edi) \ | |
62 | \ | |
63 | __switch_canary_oparam \ | |
64 | \ | |
65 | /* input parameters: */ \ | |
66 | : [next_sp] "m" (next->thread.sp), \ | |
67 | [next_ip] "m" (next->thread.ip), \ | |
68 | \ | |
69 | /* regparm parameters for __switch_to(): */ \ | |
70 | [prev] "a" (prev), \ | |
71 | [next] "d" (next) \ | |
72 | \ | |
73 | __switch_canary_iparam \ | |
74 | \ | |
75 | : /* reloaded segment registers */ \ | |
76 | "memory"); \ | |
77 | } while (0) | |
78 | ||
79 | #else /* CONFIG_X86_32 */ | |
80 | ||
81 | /* frame pointer must be last for get_wchan */ | |
2c7577a7 AL |
82 | #define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
83 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" | |
f05e798a DH |
84 | |
85 | #define __EXTRA_CLOBBER \ | |
86 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | |
2c7577a7 | 87 | "r12", "r13", "r14", "r15", "flags" |
f05e798a DH |
88 | |
89 | #ifdef CONFIG_CC_STACKPROTECTOR | |
90 | #define __switch_canary \ | |
91 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | |
92 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" | |
93 | #define __switch_canary_oparam \ | |
94 | , [gs_canary] "=m" (irq_stack_union.stack_canary) | |
95 | #define __switch_canary_iparam \ | |
96 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | |
97 | #else /* CC_STACKPROTECTOR */ | |
98 | #define __switch_canary | |
99 | #define __switch_canary_oparam | |
100 | #define __switch_canary_iparam | |
101 | #endif /* CC_STACKPROTECTOR */ | |
102 | ||
2c7577a7 AL |
103 | /* |
104 | * There is no need to save or restore flags, because flags are always | |
105 | * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL | |
106 | * has no effect. | |
107 | */ | |
f05e798a DH |
108 | #define switch_to(prev, next, last) \ |
109 | asm volatile(SAVE_CONTEXT \ | |
110 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | |
111 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | |
112 | "call __switch_to\n\t" \ | |
113 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | |
114 | __switch_canary \ | |
115 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | |
116 | "movq %%rax,%%rdi\n\t" \ | |
117 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ | |
118 | "jnz ret_from_fork\n\t" \ | |
119 | RESTORE_CONTEXT \ | |
120 | : "=a" (last) \ | |
121 | __switch_canary_oparam \ | |
122 | : [next] "S" (next), [prev] "D" (prev), \ | |
123 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | |
124 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | |
125 | [_tif_fork] "i" (_TIF_FORK), \ | |
126 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | |
127 | [current_task] "m" (current_task) \ | |
128 | __switch_canary_iparam \ | |
129 | : "memory", "cc" __EXTRA_CLOBBER) | |
130 | ||
131 | #endif /* CONFIG_X86_32 */ | |
132 | ||
133 | #endif /* _ASM_X86_SWITCH_TO_H */ |