c49c5173158e743b985c4e29f09f040811db6a48
[deliverable/linux.git] / arch / x86 / include / asm / fpu / types.h
1 /*
2 * FPU data structures:
3 */
4 #ifndef _ASM_X86_FPU_H
5 #define _ASM_X86_FPU_H
6
7 /*
8 * The legacy x87 FPU state format, as saved by FSAVE and
9 * restored by the FRSTOR instructions:
10 */
11 struct fregs_state {
12 u32 cwd; /* FPU Control Word */
13 u32 swd; /* FPU Status Word */
14 u32 twd; /* FPU Tag Word */
15 u32 fip; /* FPU IP Offset */
16 u32 fcs; /* FPU IP Selector */
17 u32 foo; /* FPU Operand Pointer Offset */
18 u32 fos; /* FPU Operand Pointer Selector */
19
20 /* 8*10 bytes for each FP-reg = 80 bytes: */
21 u32 st_space[20];
22
23 /* Software status information [not touched by FSAVE]: */
24 u32 status;
25 };
26
27 /*
28 * The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
29 * restored by the FXRSTOR instructions. It's similar to the FSAVE
30 * format, but differs in some areas, plus has extensions at
31 * the end for the XMM registers.
32 */
33 struct fxregs_state {
34 u16 cwd; /* Control Word */
35 u16 swd; /* Status Word */
36 u16 twd; /* Tag Word */
37 u16 fop; /* Last Instruction Opcode */
38 union {
39 struct {
40 u64 rip; /* Instruction Pointer */
41 u64 rdp; /* Data Pointer */
42 };
43 struct {
44 u32 fip; /* FPU IP Offset */
45 u32 fcs; /* FPU IP Selector */
46 u32 foo; /* FPU Operand Offset */
47 u32 fos; /* FPU Operand Selector */
48 };
49 };
50 u32 mxcsr; /* MXCSR Register State */
51 u32 mxcsr_mask; /* MXCSR Mask */
52
53 /* 8*16 bytes for each FP-reg = 128 bytes: */
54 u32 st_space[32];
55
56 /* 16*16 bytes for each XMM-reg = 256 bytes: */
57 u32 xmm_space[64];
58
59 u32 padding[12];
60
61 union {
62 u32 padding1[12];
63 u32 sw_reserved[12];
64 };
65
66 } __attribute__((aligned(16)));
67
68 /* Default value for fxregs_state.mxcsr: */
69 #define MXCSR_DEFAULT 0x1f80
70
71 /*
72 * Software based FPU emulation state. This is arbitrary really,
73 * it matches the x87 format to make it easier to understand:
74 */
75 struct swregs_state {
76 u32 cwd;
77 u32 swd;
78 u32 twd;
79 u32 fip;
80 u32 fcs;
81 u32 foo;
82 u32 fos;
83 /* 8*10 bytes for each FP-reg = 80 bytes: */
84 u32 st_space[20];
85 u8 ftop;
86 u8 changed;
87 u8 lookahead;
88 u8 no_update;
89 u8 rm;
90 u8 alimit;
91 struct math_emu_info *info;
92 u32 entry_eip;
93 };
94
95 /*
96 * List of XSAVE features Linux knows about:
97 */
98 enum xfeature_bit {
99 XSTATE_BIT_FP,
100 XSTATE_BIT_SSE,
101 XSTATE_BIT_YMM,
102 XSTATE_BIT_BNDREGS,
103 XSTATE_BIT_BNDCSR,
104 XSTATE_BIT_OPMASK,
105 XSTATE_BIT_ZMM_Hi256,
106 XSTATE_BIT_Hi16_ZMM,
107
108 XFEATURES_NR_MAX,
109 };
110
111 #define XSTATE_FP (1 << XSTATE_BIT_FP)
112 #define XSTATE_SSE (1 << XSTATE_BIT_SSE)
113 #define XSTATE_YMM (1 << XSTATE_BIT_YMM)
114 #define XSTATE_BNDREGS (1 << XSTATE_BIT_BNDREGS)
115 #define XSTATE_BNDCSR (1 << XSTATE_BIT_BNDCSR)
116 #define XSTATE_OPMASK (1 << XSTATE_BIT_OPMASK)
117 #define XSTATE_ZMM_Hi256 (1 << XSTATE_BIT_ZMM_Hi256)
118 #define XSTATE_Hi16_ZMM (1 << XSTATE_BIT_Hi16_ZMM)
119
120 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
121 #define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
122
123 /*
124 * There are 16x 256-bit AVX registers named YMM0-YMM15.
125 * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
126 * and are stored in 'struct fxregs_state::xmm_space[]'.
127 *
128 * The high 128 bits are stored here:
129 * 16x 128 bits == 256 bytes.
130 */
131 struct ymmh_struct {
132 u8 ymmh_space[256];
133 };
134
135 /* We don't support LWP yet: */
136 struct lwp_struct {
137 u8 reserved[128];
138 };
139
140 /* Intel MPX support: */
141 struct bndreg {
142 u64 lower_bound;
143 u64 upper_bound;
144 } __packed;
145
146 struct bndcsr {
147 u64 bndcfgu;
148 u64 bndstatus;
149 } __packed;
150
151 struct mpx_struct {
152 struct bndreg bndreg[4];
153 struct bndcsr bndcsr;
154 };
155
156 struct xstate_header {
157 u64 xfeatures;
158 u64 xcomp_bv;
159 u64 reserved[6];
160 } __attribute__((packed));
161
162 /* New processor state extensions should be added here: */
163 #define XSTATE_RESERVE (sizeof(struct ymmh_struct) + \
164 sizeof(struct lwp_struct) + \
165 sizeof(struct mpx_struct) )
166 /*
167 * This is our most modern FPU state format, as saved by the XSAVE
168 * and restored by the XRSTOR instructions.
169 *
170 * It consists of a legacy fxregs portion, an xstate header and
171 * subsequent fixed size areas as defined by the xstate header.
172 * Not all CPUs support all the extensions.
173 */
174 struct xregs_state {
175 struct fxregs_state i387;
176 struct xstate_header header;
177 u8 __reserved[XSTATE_RESERVE];
178 } __attribute__ ((packed, aligned (64)));
179
180 /*
181 * This is a union of all the possible FPU state formats
182 * put together, so that we can pick the right one runtime.
183 *
184 * The size of the structure is determined by the largest
185 * member - which is the xsave area:
186 */
187 union fpregs_state {
188 struct fregs_state fsave;
189 struct fxregs_state fxsave;
190 struct swregs_state soft;
191 struct xregs_state xsave;
192 u8 __padding[PAGE_SIZE];
193 };
194
195 /*
196 * Highest level per task FPU state data structure that
197 * contains the FPU register state plus various FPU
198 * state fields:
199 */
200 struct fpu {
201 /*
202 * @last_cpu:
203 *
204 * Records the last CPU on which this context was loaded into
205 * FPU registers. (In the lazy-restore case we might be
206 * able to reuse FPU registers across multiple context switches
207 * this way, if no intermediate task used the FPU.)
208 *
209 * A value of -1 is used to indicate that the FPU state in context
210 * memory is newer than the FPU state in registers, and that the
211 * FPU state should be reloaded next time the task is run.
212 */
213 unsigned int last_cpu;
214
215 /*
216 * @fpstate_active:
217 *
218 * This flag indicates whether this context is active: if the task
219 * is not running then we can restore from this context, if the task
220 * is running then we should save into this context.
221 */
222 unsigned char fpstate_active;
223
224 /*
225 * @fpregs_active:
226 *
227 * This flag determines whether a given context is actively
228 * loaded into the FPU's registers and that those registers
229 * represent the task's current FPU state.
230 *
231 * Note the interaction with fpstate_active:
232 *
233 * # task does not use the FPU:
234 * fpstate_active == 0
235 *
236 * # task uses the FPU and regs are active:
237 * fpstate_active == 1 && fpregs_active == 1
238 *
239 * # the regs are inactive but still match fpstate:
240 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
241 *
242 * The third state is what we use for the lazy restore optimization
243 * on lazy-switching CPUs.
244 */
245 unsigned char fpregs_active;
246
247 /*
248 * @counter:
249 *
250 * This counter contains the number of consecutive context switches
251 * during which the FPU stays used. If this is over a threshold, the
252 * lazy FPU restore logic becomes eager, to save the trap overhead.
253 * This is an unsigned char so that after 256 iterations the counter
254 * wraps and the context switch behavior turns lazy again; this is to
255 * deal with bursty apps that only use the FPU for a short time:
256 */
257 unsigned char counter;
258 /*
259 * @state:
260 *
261 * In-memory copy of all FPU registers that we save/restore
262 * over context switches. If the task is using the FPU then
263 * the registers in the FPU are more recent than this state
264 * copy. If the task context-switches away then they get
265 * saved here and represent the FPU state.
266 *
267 * After context switches there may be a (short) time period
268 * during which the in-FPU hardware registers are unchanged
269 * and still perfectly match this state, if the tasks
270 * scheduled afterwards are not using the FPU.
271 *
272 * This is the 'lazy restore' window of optimization, which
273 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
274 *
275 * We detect whether a subsequent task uses the FPU via setting
276 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
277 *
278 * During this window, if the task gets scheduled again, we
279 * might be able to skip having to do a restore from this
280 * memory buffer to the hardware registers - at the cost of
281 * incurring the overhead of #NM fault traps.
282 *
283 * Note that on modern CPUs that support the XSAVEOPT (or other
284 * optimized XSAVE instructions), we don't use #NM traps anymore,
285 * as the hardware can track whether FPU registers need saving
286 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
287 * logic, which unconditionally saves/restores all FPU state
288 * across context switches. (if FPU state exists.)
289 */
290 union fpregs_state state;
291 /*
292 * WARNING: 'state' is dynamically-sized. Do not put
293 * anything after it here.
294 */
295 };
296
297 #endif /* _ASM_X86_FPU_H */
This page took 0.036831 seconds and 4 git commands to generate.