Commit | Line | Data |
---|---|---|
2052e8d4 CL |
1 | /* thread_info.h: low-level thread information |
2 | * | |
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | |
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | |
5 | */ | |
6 | ||
1965aae3 PA |
7 | #ifndef _ASM_X86_THREAD_INFO_H |
8 | #define _ASM_X86_THREAD_INFO_H | |
2052e8d4 | 9 | |
2052e8d4 CL |
10 | #include <linux/compiler.h> |
11 | #include <asm/page.h> | |
198d208d | 12 | #include <asm/percpu.h> |
12a638e1 CL |
13 | #include <asm/types.h> |
14 | ||
3ee4298f AL |
15 | /* |
16 | * TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we | |
17 | * reserve at the top of the kernel stack. We do it because of a nasty | |
18 | * 32-bit corner case. On x86_32, the hardware stack frame is | |
19 | * variable-length. Except for vm86 mode, struct pt_regs assumes a | |
20 | * maximum-length frame. If we enter from CPL 0, the top 8 bytes of | |
21 | * pt_regs don't actually exist. Ordinarily this doesn't matter, but it | |
22 | * does in at least one case: | |
23 | * | |
24 | * If we take an NMI early enough in SYSENTER, then we can end up with | |
25 | * pt_regs that extends above sp0. On the way out, in the espfix code, | |
26 | * we can read the saved SS value, but that value will be above sp0. | |
27 | * Without this offset, that can result in a page fault. (We are | |
28 | * careful that, in this case, the value we read doesn't matter.) | |
29 | * | |
5ed92a8a BG |
30 | * In vm86 mode, the hardware frame is much longer still, so add 16 |
31 | * bytes to make room for the real-mode segments. | |
3ee4298f AL |
32 | * |
33 | * x86_64 has a fixed-length stack frame. | |
34 | */ | |
35 | #ifdef CONFIG_X86_32 | |
5ed92a8a BG |
36 | # ifdef CONFIG_VM86 |
37 | # define TOP_OF_KERNEL_STACK_PADDING 16 | |
38 | # else | |
39 | # define TOP_OF_KERNEL_STACK_PADDING 8 | |
40 | # endif | |
3ee4298f AL |
41 | #else |
42 | # define TOP_OF_KERNEL_STACK_PADDING 0 | |
43 | #endif | |
44 | ||
2052e8d4 CL |
45 | /* |
46 | * low level task data that entry.S needs immediate access to | |
47 | * - this struct should fit entirely inside of one cache line | |
48 | * - this struct shares the supervisor stack pages | |
2052e8d4 CL |
49 | */ |
50 | #ifndef __ASSEMBLY__ | |
006c484b | 51 | struct task_struct; |
006c484b | 52 | #include <asm/processor.h> |
60063497 | 53 | #include <linux/atomic.h> |
2052e8d4 CL |
54 | |
55 | struct thread_info { | |
56 | struct task_struct *task; /* main task structure */ | |
affa219b | 57 | __u32 flags; /* low level flags */ |
006c484b | 58 | __u32 status; /* thread synchronous flags */ |
2052e8d4 | 59 | __u32 cpu; /* current CPU */ |
006c484b | 60 | mm_segment_t addr_limit; |
0788aa6a SR |
61 | unsigned int sig_on_uaccess_error:1; |
62 | unsigned int uaccess_err:1; /* uaccess failed */ | |
2052e8d4 | 63 | }; |
3351cc03 CL |
64 | |
65 | #define INIT_THREAD_INFO(tsk) \ | |
66 | { \ | |
67 | .task = &tsk, \ | |
3351cc03 CL |
68 | .flags = 0, \ |
69 | .cpu = 0, \ | |
3351cc03 | 70 | .addr_limit = KERNEL_DS, \ |
3351cc03 CL |
71 | } |
72 | ||
73 | #define init_thread_info (init_thread_union.thread_info) | |
74 | #define init_stack (init_thread_union.stack) | |
75 | ||
2052e8d4 CL |
76 | #else /* !__ASSEMBLY__ */ |
77 | ||
78 | #include <asm/asm-offsets.h> | |
79 | ||
80 | #endif | |
81 | ||
e57549b0 CL |
82 | /* |
83 | * thread information flags | |
84 | * - these are process state flags that various assembly files | |
85 | * may need to access | |
86 | * - pending work-to-be-done flags are in LSW | |
87 | * - other flags in MSW | |
88 | * Warning: layout of LSW is hardcoded in entry.S | |
89 | */ | |
90 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | |
59e52130 | 91 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ |
e57549b0 CL |
92 | #define TIF_SIGPENDING 2 /* signal pending */ |
93 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | |
94 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | |
e57549b0 | 95 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
e57549b0 CL |
96 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
97 | #define TIF_SECCOMP 8 /* secure computing */ | |
7c68af6e | 98 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ |
0326f5a9 | 99 | #define TIF_UPROBE 12 /* breakpointed or singlestepping */ |
e57549b0 | 100 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
6bd33008 | 101 | #define TIF_IA32 17 /* IA32 compatibility process */ |
e57549b0 | 102 | #define TIF_FORK 18 /* ret_from_fork */ |
bf5a3c13 | 103 | #define TIF_NOHZ 19 /* in adaptive nohz mode */ |
0ddc9324 | 104 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ |
f80c5b39 | 105 | #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ |
e57549b0 | 106 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
e57549b0 | 107 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ |
ea8e61b7 | 108 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ |
b407fc57 | 109 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
66700001 | 110 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ |
6bd33008 | 111 | #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ |
bb212724 | 112 | #define TIF_X32 30 /* 32-bit native x86-64 binary */ |
e57549b0 CL |
113 | |
114 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | |
59e52130 | 115 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
e57549b0 CL |
116 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
117 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | |
118 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | |
e57549b0 | 119 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
e57549b0 CL |
120 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
121 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | |
7c68af6e | 122 | #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) |
0326f5a9 | 123 | #define _TIF_UPROBE (1 << TIF_UPROBE) |
e57549b0 CL |
124 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
125 | #define _TIF_IA32 (1 << TIF_IA32) | |
126 | #define _TIF_FORK (1 << TIF_FORK) | |
bf5a3c13 | 127 | #define _TIF_NOHZ (1 << TIF_NOHZ) |
f80c5b39 | 128 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
e57549b0 | 129 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
e57549b0 | 130 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
ea8e61b7 | 131 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
b407fc57 | 132 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
66700001 | 133 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
6bd33008 | 134 | #define _TIF_ADDR32 (1 << TIF_ADDR32) |
bb212724 | 135 | #define _TIF_X32 (1 << TIF_X32) |
e57549b0 | 136 | |
d4d67150 RM |
137 | /* work to do in syscall_trace_enter() */ |
138 | #define _TIF_WORK_SYSCALL_ENTRY \ | |
66700001 | 139 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ |
bf5a3c13 FW |
140 | _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ |
141 | _TIF_NOHZ) | |
d4d67150 | 142 | |
00c1bb13 | 143 | /* work to do on any return to user space */ |
66700001 | 144 | #define _TIF_ALLWORK_MASK \ |
bf5a3c13 FW |
145 | ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ |
146 | _TIF_NOHZ) | |
00c1bb13 | 147 | |
00c1bb13 CL |
148 | /* flags to check in __switch_to() */ |
149 | #define _TIF_WORK_CTXSW \ | |
ea8e61b7 | 150 | (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) |
00c1bb13 | 151 | |
7c68af6e | 152 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
37f07655 | 153 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
00c1bb13 | 154 | |
198d208d | 155 | #define STACK_WARN (THREAD_SIZE/8) |
b84200b3 | 156 | |
2052e8d4 CL |
157 | /* |
158 | * macros/functions for gaining access to the thread information structure | |
159 | * | |
160 | * preempt_count needs to be 1 initially, until the scheduler is functional. | |
161 | */ | |
162 | #ifndef __ASSEMBLY__ | |
163 | ||
9af45651 | 164 | static inline struct thread_info *current_thread_info(void) |
2052e8d4 | 165 | { |
a7fcf28d | 166 | return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE); |
2052e8d4 CL |
167 | } |
168 | ||
83653c16 AL |
169 | static inline unsigned long current_stack_pointer(void) |
170 | { | |
171 | unsigned long sp; | |
172 | #ifdef CONFIG_X86_64 | |
173 | asm("mov %%rsp,%0" : "=g" (sp)); | |
174 | #else | |
175 | asm("mov %%esp,%0" : "=g" (sp)); | |
176 | #endif | |
177 | return sp; | |
178 | } | |
179 | ||
2052e8d4 CL |
180 | #else /* !__ASSEMBLY__ */ |
181 | ||
3a23208e DV |
182 | #ifdef CONFIG_X86_64 |
183 | # define cpu_current_top_of_stack (cpu_tss + TSS_sp0) | |
184 | #endif | |
185 | ||
ef593260 | 186 | /* Load thread_info address into "reg" */ |
2052e8d4 | 187 | #define GET_THREAD_INFO(reg) \ |
63332a84 | 188 | _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ |
ef593260 | 189 | _ASM_SUB $(THREAD_SIZE),reg ; |
2052e8d4 | 190 | |
46db09d3 | 191 | /* |
1ddc6f3c IM |
192 | * ASM operand which evaluates to a 'thread_info' address of |
193 | * the current task, if it is known that "reg" is exactly "off" | |
194 | * bytes below the top of the stack currently. | |
195 | * | |
196 | * ( The kernel stack's size is known at build time, it is usually | |
197 | * 2 or 4 pages, and the bottom of the kernel stack contains | |
198 | * the thread_info structure. So to access the thread_info very | |
199 | * quickly from assembly code we can calculate down from the | |
200 | * top of the kernel stack to the bottom, using constant, | |
201 | * build-time calculations only. ) | |
202 | * | |
203 | * For example, to fetch the current thread_info->flags value into %eax | |
204 | * on x86-64 defconfig kernels, in syscall entry code where RSP is | |
205 | * currently at exactly SIZEOF_PTREGS bytes away from the top of the | |
206 | * stack: | |
207 | * | |
dca5b52a | 208 | * mov ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS), %eax |
1ddc6f3c IM |
209 | * |
210 | * will translate to: | |
211 | * | |
212 | * 8b 84 24 b8 c0 ff ff mov -0x3f48(%rsp), %eax | |
213 | * | |
214 | * which is below the current RSP by almost 16K. | |
46db09d3 | 215 | */ |
dca5b52a | 216 | #define ASM_THREAD_INFO(field, reg, off) ((field)+(off)-THREAD_SIZE)(reg) |
46db09d3 | 217 | |
2052e8d4 CL |
218 | #endif |
219 | ||
2052e8d4 CL |
220 | /* |
221 | * Thread-synchronous status. | |
222 | * | |
223 | * This is different from the flags in that nobody else | |
224 | * ever touches our thread-synchronous status, so we don't | |
225 | * have to worry about atomic accesses. | |
226 | */ | |
f2ea3b1d | 227 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
8a6c160a | 228 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ |
2052e8d4 | 229 | |
8a6c160a IM |
230 | #ifndef __ASSEMBLY__ |
231 | #define HAVE_SET_RESTORE_SIGMASK 1 | |
232 | static inline void set_restore_sigmask(void) | |
233 | { | |
234 | struct thread_info *ti = current_thread_info(); | |
235 | ti->status |= TS_RESTORE_SIGMASK; | |
edd63a27 | 236 | WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); |
8a6c160a | 237 | } |
4ebefe3e AV |
238 | static inline void clear_restore_sigmask(void) |
239 | { | |
240 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | |
241 | } | |
242 | static inline bool test_restore_sigmask(void) | |
243 | { | |
244 | return current_thread_info()->status & TS_RESTORE_SIGMASK; | |
245 | } | |
246 | static inline bool test_and_clear_restore_sigmask(void) | |
247 | { | |
248 | struct thread_info *ti = current_thread_info(); | |
249 | if (!(ti->status & TS_RESTORE_SIGMASK)) | |
250 | return false; | |
251 | ti->status &= ~TS_RESTORE_SIGMASK; | |
252 | return true; | |
253 | } | |
ef334a20 SD |
254 | |
255 | static inline bool is_ia32_task(void) | |
256 | { | |
257 | #ifdef CONFIG_X86_32 | |
258 | return true; | |
259 | #endif | |
260 | #ifdef CONFIG_IA32_EMULATION | |
261 | if (current_thread_info()->status & TS_COMPAT) | |
262 | return true; | |
263 | #endif | |
264 | return false; | |
265 | } | |
1daeaa31 BG |
266 | |
267 | /* | |
268 | * Force syscall return via IRET by making it look as if there was | |
269 | * some work pending. IRET is our most capable (but slowest) syscall | |
270 | * return path, which is able to restore modified SS, CS and certain | |
271 | * EFLAGS values that other (fast) syscall return instructions | |
272 | * are not able to restore properly. | |
273 | */ | |
274 | #define force_iret() set_thread_flag(TIF_NOTIFY_RESUME) | |
275 | ||
8a6c160a IM |
276 | #endif /* !__ASSEMBLY__ */ |
277 | ||
61c4628b SS |
278 | #ifndef __ASSEMBLY__ |
279 | extern void arch_task_cache_init(void); | |
61c4628b | 280 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); |
38e7c572 | 281 | extern void arch_release_task_struct(struct task_struct *tsk); |
61c4628b | 282 | #endif |
1965aae3 | 283 | #endif /* _ASM_X86_THREAD_INFO_H */ |