x86/fpu, x86/mm/pkeys: Add PKRU xsave fields and data structures
[deliverable/linux.git] / arch / x86 / include / asm / fpu / types.h
1 /*
2 * FPU data structures:
3 */
4 #ifndef _ASM_X86_FPU_H
5 #define _ASM_X86_FPU_H
6
7 /*
8 * The legacy x87 FPU state format, as saved by FSAVE and
9 * restored by the FRSTOR instructions:
10 */
11 struct fregs_state {
12 u32 cwd; /* FPU Control Word */
13 u32 swd; /* FPU Status Word */
14 u32 twd; /* FPU Tag Word */
15 u32 fip; /* FPU IP Offset */
16 u32 fcs; /* FPU IP Selector */
17 u32 foo; /* FPU Operand Pointer Offset */
18 u32 fos; /* FPU Operand Pointer Selector */
19
20 /* 8*10 bytes for each FP-reg = 80 bytes: */
21 u32 st_space[20];
22
23 /* Software status information [not touched by FSAVE]: */
24 u32 status;
25 };
26
27 /*
28 * The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
29 * restored by the FXRSTOR instructions. It's similar to the FSAVE
30 * format, but differs in some areas, plus has extensions at
31 * the end for the XMM registers.
32 */
33 struct fxregs_state {
34 u16 cwd; /* Control Word */
35 u16 swd; /* Status Word */
36 u16 twd; /* Tag Word */
37 u16 fop; /* Last Instruction Opcode */
38 union {
39 struct {
40 u64 rip; /* Instruction Pointer */
41 u64 rdp; /* Data Pointer */
42 };
43 struct {
44 u32 fip; /* FPU IP Offset */
45 u32 fcs; /* FPU IP Selector */
46 u32 foo; /* FPU Operand Offset */
47 u32 fos; /* FPU Operand Selector */
48 };
49 };
50 u32 mxcsr; /* MXCSR Register State */
51 u32 mxcsr_mask; /* MXCSR Mask */
52
53 /* 8*16 bytes for each FP-reg = 128 bytes: */
54 u32 st_space[32];
55
56 /* 16*16 bytes for each XMM-reg = 256 bytes: */
57 u32 xmm_space[64];
58
59 u32 padding[12];
60
61 union {
62 u32 padding1[12];
63 u32 sw_reserved[12];
64 };
65
66 } __attribute__((aligned(16)));
67
68 /* Default value for fxregs_state.mxcsr: */
69 #define MXCSR_DEFAULT 0x1f80
70
71 /*
72 * Software based FPU emulation state. This is arbitrary really,
73 * it matches the x87 format to make it easier to understand:
74 */
75 struct swregs_state {
76 u32 cwd;
77 u32 swd;
78 u32 twd;
79 u32 fip;
80 u32 fcs;
81 u32 foo;
82 u32 fos;
83 /* 8*10 bytes for each FP-reg = 80 bytes: */
84 u32 st_space[20];
85 u8 ftop;
86 u8 changed;
87 u8 lookahead;
88 u8 no_update;
89 u8 rm;
90 u8 alimit;
91 struct math_emu_info *info;
92 u32 entry_eip;
93 };
94
95 /*
96 * List of XSAVE features Linux knows about:
97 */
98 enum xfeature {
99 XFEATURE_FP,
100 XFEATURE_SSE,
101 /*
102 * Values above here are "legacy states".
103 * Those below are "extended states".
104 */
105 XFEATURE_YMM,
106 XFEATURE_BNDREGS,
107 XFEATURE_BNDCSR,
108 XFEATURE_OPMASK,
109 XFEATURE_ZMM_Hi256,
110 XFEATURE_Hi16_ZMM,
111 XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
112 XFEATURE_PKRU,
113
114 XFEATURE_MAX,
115 };
116
117 #define XFEATURE_MASK_FP (1 << XFEATURE_FP)
118 #define XFEATURE_MASK_SSE (1 << XFEATURE_SSE)
119 #define XFEATURE_MASK_YMM (1 << XFEATURE_YMM)
120 #define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS)
121 #define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR)
122 #define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
123 #define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
124 #define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
125 #define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
126
127 #define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
128 #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
129 | XFEATURE_MASK_ZMM_Hi256 \
130 | XFEATURE_MASK_Hi16_ZMM)
131
132 #define FIRST_EXTENDED_XFEATURE XFEATURE_YMM
133
134 struct reg_128_bit {
135 u8 regbytes[128/8];
136 };
137 struct reg_256_bit {
138 u8 regbytes[256/8];
139 };
140 struct reg_512_bit {
141 u8 regbytes[512/8];
142 };
143
144 /*
145 * State component 2:
146 *
147 * There are 16x 256-bit AVX registers named YMM0-YMM15.
148 * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
149 * and are stored in 'struct fxregs_state::xmm_space[]' in the
150 * "legacy" area.
151 *
152 * The high 128 bits are stored here.
153 */
154 struct ymmh_struct {
155 struct reg_128_bit hi_ymm[16];
156 } __packed;
157
158 /* Intel MPX support: */
159
160 struct mpx_bndreg {
161 u64 lower_bound;
162 u64 upper_bound;
163 } __packed;
164 /*
165 * State component 3 is used for the 4 128-bit bounds registers
166 */
167 struct mpx_bndreg_state {
168 struct mpx_bndreg bndreg[4];
169 } __packed;
170
171 /*
172 * State component 4 is used for the 64-bit user-mode MPX
173 * configuration register BNDCFGU and the 64-bit MPX status
174 * register BNDSTATUS. We call the pair "BNDCSR".
175 */
176 struct mpx_bndcsr {
177 u64 bndcfgu;
178 u64 bndstatus;
179 } __packed;
180
181 /*
182 * The BNDCSR state is padded out to be 64-bytes in size.
183 */
184 struct mpx_bndcsr_state {
185 union {
186 struct mpx_bndcsr bndcsr;
187 u8 pad_to_64_bytes[64];
188 };
189 } __packed;
190
191 /* AVX-512 Components: */
192
193 /*
194 * State component 5 is used for the 8 64-bit opmask registers
195 * k0-k7 (opmask state).
196 */
197 struct avx_512_opmask_state {
198 u64 opmask_reg[8];
199 } __packed;
200
201 /*
202 * State component 6 is used for the upper 256 bits of the
203 * registers ZMM0-ZMM15. These 16 256-bit values are denoted
204 * ZMM0_H-ZMM15_H (ZMM_Hi256 state).
205 */
206 struct avx_512_zmm_uppers_state {
207 struct reg_256_bit zmm_upper[16];
208 } __packed;
209
210 /*
211 * State component 7 is used for the 16 512-bit registers
212 * ZMM16-ZMM31 (Hi16_ZMM state).
213 */
214 struct avx_512_hi16_state {
215 struct reg_512_bit hi16_zmm[16];
216 } __packed;
217
218 /*
219 * State component 9: 32-bit PKRU register. The state is
220 * 8 bytes long but only 4 bytes is used currently.
221 */
222 struct pkru_state {
223 u32 pkru;
224 u32 pad;
225 } __packed;
226
227 struct xstate_header {
228 u64 xfeatures;
229 u64 xcomp_bv;
230 u64 reserved[6];
231 } __attribute__((packed));
232
233 /*
234 * This is our most modern FPU state format, as saved by the XSAVE
235 * and restored by the XRSTOR instructions.
236 *
237 * It consists of a legacy fxregs portion, an xstate header and
238 * subsequent areas as defined by the xstate header. Not all CPUs
239 * support all the extensions, so the size of the extended area
240 * can vary quite a bit between CPUs.
241 */
242 struct xregs_state {
243 struct fxregs_state i387;
244 struct xstate_header header;
245 u8 extended_state_area[0];
246 } __attribute__ ((packed, aligned (64)));
247
248 /*
249 * This is a union of all the possible FPU state formats
250 * put together, so that we can pick the right one runtime.
251 *
252 * The size of the structure is determined by the largest
253 * member - which is the xsave area. The padding is there
254 * to ensure that statically-allocated task_structs (just
255 * the init_task today) have enough space.
256 */
257 union fpregs_state {
258 struct fregs_state fsave;
259 struct fxregs_state fxsave;
260 struct swregs_state soft;
261 struct xregs_state xsave;
262 u8 __padding[PAGE_SIZE];
263 };
264
265 /*
266 * Highest level per task FPU state data structure that
267 * contains the FPU register state plus various FPU
268 * state fields:
269 */
270 struct fpu {
271 /*
272 * @last_cpu:
273 *
274 * Records the last CPU on which this context was loaded into
275 * FPU registers. (In the lazy-restore case we might be
276 * able to reuse FPU registers across multiple context switches
277 * this way, if no intermediate task used the FPU.)
278 *
279 * A value of -1 is used to indicate that the FPU state in context
280 * memory is newer than the FPU state in registers, and that the
281 * FPU state should be reloaded next time the task is run.
282 */
283 unsigned int last_cpu;
284
285 /*
286 * @fpstate_active:
287 *
288 * This flag indicates whether this context is active: if the task
289 * is not running then we can restore from this context, if the task
290 * is running then we should save into this context.
291 */
292 unsigned char fpstate_active;
293
294 /*
295 * @fpregs_active:
296 *
297 * This flag determines whether a given context is actively
298 * loaded into the FPU's registers and that those registers
299 * represent the task's current FPU state.
300 *
301 * Note the interaction with fpstate_active:
302 *
303 * # task does not use the FPU:
304 * fpstate_active == 0
305 *
306 * # task uses the FPU and regs are active:
307 * fpstate_active == 1 && fpregs_active == 1
308 *
309 * # the regs are inactive but still match fpstate:
310 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
311 *
312 * The third state is what we use for the lazy restore optimization
313 * on lazy-switching CPUs.
314 */
315 unsigned char fpregs_active;
316
317 /*
318 * @counter:
319 *
320 * This counter contains the number of consecutive context switches
321 * during which the FPU stays used. If this is over a threshold, the
322 * lazy FPU restore logic becomes eager, to save the trap overhead.
323 * This is an unsigned char so that after 256 iterations the counter
324 * wraps and the context switch behavior turns lazy again; this is to
325 * deal with bursty apps that only use the FPU for a short time:
326 */
327 unsigned char counter;
328 /*
329 * @state:
330 *
331 * In-memory copy of all FPU registers that we save/restore
332 * over context switches. If the task is using the FPU then
333 * the registers in the FPU are more recent than this state
334 * copy. If the task context-switches away then they get
335 * saved here and represent the FPU state.
336 *
337 * After context switches there may be a (short) time period
338 * during which the in-FPU hardware registers are unchanged
339 * and still perfectly match this state, if the tasks
340 * scheduled afterwards are not using the FPU.
341 *
342 * This is the 'lazy restore' window of optimization, which
343 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
344 *
345 * We detect whether a subsequent task uses the FPU via setting
346 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
347 *
348 * During this window, if the task gets scheduled again, we
349 * might be able to skip having to do a restore from this
350 * memory buffer to the hardware registers - at the cost of
351 * incurring the overhead of #NM fault traps.
352 *
353 * Note that on modern CPUs that support the XSAVEOPT (or other
354 * optimized XSAVE instructions), we don't use #NM traps anymore,
355 * as the hardware can track whether FPU registers need saving
356 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
357 * logic, which unconditionally saves/restores all FPU state
358 * across context switches. (if FPU state exists.)
359 */
360 union fpregs_state state;
361 /*
362 * WARNING: 'state' is dynamically-sized. Do not put
363 * anything after it here.
364 */
365 };
366
367 #endif /* _ASM_X86_FPU_H */
This page took 0.039469 seconds and 5 git commands to generate.