ASoC: rt5645: Add struct dmi_system_id "Google Ultima" for chrome platform
[deliverable/linux.git] / arch / x86 / include / asm / fpu / types.h
1 /*
2 * FPU data structures:
3 */
4 #ifndef _ASM_X86_FPU_H
5 #define _ASM_X86_FPU_H
6
7 /*
8 * The legacy x87 FPU state format, as saved by FSAVE and
9 * restored by the FRSTOR instructions:
10 */
11 struct fregs_state {
12 u32 cwd; /* FPU Control Word */
13 u32 swd; /* FPU Status Word */
14 u32 twd; /* FPU Tag Word */
15 u32 fip; /* FPU IP Offset */
16 u32 fcs; /* FPU IP Selector */
17 u32 foo; /* FPU Operand Pointer Offset */
18 u32 fos; /* FPU Operand Pointer Selector */
19
20 /* 8*10 bytes for each FP-reg = 80 bytes: */
21 u32 st_space[20];
22
23 /* Software status information [not touched by FSAVE]: */
24 u32 status;
25 };
26
27 /*
28 * The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
29 * restored by the FXRSTOR instructions. It's similar to the FSAVE
30 * format, but differs in some areas, plus has extensions at
31 * the end for the XMM registers.
32 */
33 struct fxregs_state {
34 u16 cwd; /* Control Word */
35 u16 swd; /* Status Word */
36 u16 twd; /* Tag Word */
37 u16 fop; /* Last Instruction Opcode */
38 union {
39 struct {
40 u64 rip; /* Instruction Pointer */
41 u64 rdp; /* Data Pointer */
42 };
43 struct {
44 u32 fip; /* FPU IP Offset */
45 u32 fcs; /* FPU IP Selector */
46 u32 foo; /* FPU Operand Offset */
47 u32 fos; /* FPU Operand Selector */
48 };
49 };
50 u32 mxcsr; /* MXCSR Register State */
51 u32 mxcsr_mask; /* MXCSR Mask */
52
53 /* 8*16 bytes for each FP-reg = 128 bytes: */
54 u32 st_space[32];
55
56 /* 16*16 bytes for each XMM-reg = 256 bytes: */
57 u32 xmm_space[64];
58
59 u32 padding[12];
60
61 union {
62 u32 padding1[12];
63 u32 sw_reserved[12];
64 };
65
66 } __attribute__((aligned(16)));
67
68 /* Default value for fxregs_state.mxcsr: */
69 #define MXCSR_DEFAULT 0x1f80
70
71 /*
72 * Software based FPU emulation state. This is arbitrary really,
73 * it matches the x87 format to make it easier to understand:
74 */
75 struct swregs_state {
76 u32 cwd;
77 u32 swd;
78 u32 twd;
79 u32 fip;
80 u32 fcs;
81 u32 foo;
82 u32 fos;
83 /* 8*10 bytes for each FP-reg = 80 bytes: */
84 u32 st_space[20];
85 u8 ftop;
86 u8 changed;
87 u8 lookahead;
88 u8 no_update;
89 u8 rm;
90 u8 alimit;
91 struct math_emu_info *info;
92 u32 entry_eip;
93 };
94
95 /*
96 * List of XSAVE features Linux knows about:
97 */
98 enum xfeature_bit {
99 XSTATE_BIT_FP,
100 XSTATE_BIT_SSE,
101 XSTATE_BIT_YMM,
102 XSTATE_BIT_BNDREGS,
103 XSTATE_BIT_BNDCSR,
104 XSTATE_BIT_OPMASK,
105 XSTATE_BIT_ZMM_Hi256,
106 XSTATE_BIT_Hi16_ZMM,
107
108 XFEATURES_NR_MAX,
109 };
110
111 #define XSTATE_FP (1 << XSTATE_BIT_FP)
112 #define XSTATE_SSE (1 << XSTATE_BIT_SSE)
113 #define XSTATE_YMM (1 << XSTATE_BIT_YMM)
114 #define XSTATE_BNDREGS (1 << XSTATE_BIT_BNDREGS)
115 #define XSTATE_BNDCSR (1 << XSTATE_BIT_BNDCSR)
116 #define XSTATE_OPMASK (1 << XSTATE_BIT_OPMASK)
117 #define XSTATE_ZMM_Hi256 (1 << XSTATE_BIT_ZMM_Hi256)
118 #define XSTATE_Hi16_ZMM (1 << XSTATE_BIT_Hi16_ZMM)
119
120 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
121 #define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
122
123 /*
124 * There are 16x 256-bit AVX registers named YMM0-YMM15.
125 * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
126 * and are stored in 'struct fxregs_state::xmm_space[]'.
127 *
128 * The high 128 bits are stored here:
129 * 16x 128 bits == 256 bytes.
130 */
131 struct ymmh_struct {
132 u8 ymmh_space[256];
133 };
134
135 /* We don't support LWP yet: */
136 struct lwp_struct {
137 u8 reserved[128];
138 };
139
140 /* Intel MPX support: */
141 struct bndreg {
142 u64 lower_bound;
143 u64 upper_bound;
144 } __packed;
145
146 struct bndcsr {
147 u64 bndcfgu;
148 u64 bndstatus;
149 } __packed;
150
151 struct mpx_struct {
152 struct bndreg bndreg[4];
153 struct bndcsr bndcsr;
154 };
155
156 struct xstate_header {
157 u64 xfeatures;
158 u64 xcomp_bv;
159 u64 reserved[6];
160 } __attribute__((packed));
161
162 /* New processor state extensions should be added here: */
163 #define XSTATE_RESERVE (sizeof(struct ymmh_struct) + \
164 sizeof(struct lwp_struct) + \
165 sizeof(struct mpx_struct) )
166 /*
167 * This is our most modern FPU state format, as saved by the XSAVE
168 * and restored by the XRSTOR instructions.
169 *
170 * It consists of a legacy fxregs portion, an xstate header and
171 * subsequent fixed size areas as defined by the xstate header.
172 * Not all CPUs support all the extensions.
173 */
174 struct xregs_state {
175 struct fxregs_state i387;
176 struct xstate_header header;
177 u8 __reserved[XSTATE_RESERVE];
178 } __attribute__ ((packed, aligned (64)));
179
180 /*
181 * This is a union of all the possible FPU state formats
182 * put together, so that we can pick the right one runtime.
183 *
184 * The size of the structure is determined by the largest
185 * member - which is the xsave area:
186 */
187 union fpregs_state {
188 struct fregs_state fsave;
189 struct fxregs_state fxsave;
190 struct swregs_state soft;
191 struct xregs_state xsave;
192 };
193
194 /*
195 * Highest level per task FPU state data structure that
196 * contains the FPU register state plus various FPU
197 * state fields:
198 */
199 struct fpu {
200 /*
201 * @state:
202 *
203 * In-memory copy of all FPU registers that we save/restore
204 * over context switches. If the task is using the FPU then
205 * the registers in the FPU are more recent than this state
206 * copy. If the task context-switches away then they get
207 * saved here and represent the FPU state.
208 *
209 * After context switches there may be a (short) time period
210 * during which the in-FPU hardware registers are unchanged
211 * and still perfectly match this state, if the tasks
212 * scheduled afterwards are not using the FPU.
213 *
214 * This is the 'lazy restore' window of optimization, which
215 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
216 *
217 * We detect whether a subsequent task uses the FPU via setting
218 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
219 *
220 * During this window, if the task gets scheduled again, we
221 * might be able to skip having to do a restore from this
222 * memory buffer to the hardware registers - at the cost of
223 * incurring the overhead of #NM fault traps.
224 *
225 * Note that on modern CPUs that support the XSAVEOPT (or other
226 * optimized XSAVE instructions), we don't use #NM traps anymore,
227 * as the hardware can track whether FPU registers need saving
228 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
229 * logic, which unconditionally saves/restores all FPU state
230 * across context switches. (if FPU state exists.)
231 */
232 union fpregs_state state;
233
234 /*
235 * @last_cpu:
236 *
237 * Records the last CPU on which this context was loaded into
238 * FPU registers. (In the lazy-restore case we might be
239 * able to reuse FPU registers across multiple context switches
240 * this way, if no intermediate task used the FPU.)
241 *
242 * A value of -1 is used to indicate that the FPU state in context
243 * memory is newer than the FPU state in registers, and that the
244 * FPU state should be reloaded next time the task is run.
245 */
246 unsigned int last_cpu;
247
248 /*
249 * @fpstate_active:
250 *
251 * This flag indicates whether this context is active: if the task
252 * is not running then we can restore from this context, if the task
253 * is running then we should save into this context.
254 */
255 unsigned char fpstate_active;
256
257 /*
258 * @fpregs_active:
259 *
260 * This flag determines whether a given context is actively
261 * loaded into the FPU's registers and that those registers
262 * represent the task's current FPU state.
263 *
264 * Note the interaction with fpstate_active:
265 *
266 * # task does not use the FPU:
267 * fpstate_active == 0
268 *
269 * # task uses the FPU and regs are active:
270 * fpstate_active == 1 && fpregs_active == 1
271 *
272 * # the regs are inactive but still match fpstate:
273 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
274 *
275 * The third state is what we use for the lazy restore optimization
276 * on lazy-switching CPUs.
277 */
278 unsigned char fpregs_active;
279
280 /*
281 * @counter:
282 *
283 * This counter contains the number of consecutive context switches
284 * during which the FPU stays used. If this is over a threshold, the
285 * lazy FPU restore logic becomes eager, to save the trap overhead.
286 * This is an unsigned char so that after 256 iterations the counter
287 * wraps and the context switch behavior turns lazy again; this is to
288 * deal with bursty apps that only use the FPU for a short time:
289 */
290 unsigned char counter;
291 };
292
293 #endif /* _ASM_X86_FPU_H */
This page took 0.036766 seconds and 5 git commands to generate.