1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<16) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<17) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<17) /* Register operand. */
53 #define DstMem (3<<17) /* Memory operand. */
54 #define DstAcc (4<<17) /* Destination Accumulator */
55 #define DstDI (5<<17) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<17) /* 64bit memory operand */
57 #define DstMask (7<<17)
58 /* Source operand type. */
59 #define SrcNone (0<<4) /* No source operand. */
60 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
61 #define SrcReg (1<<4) /* Register operand. */
62 #define SrcMem (2<<4) /* Memory operand. */
63 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65 #define SrcImm (5<<4) /* Immediate operand. */
66 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67 #define SrcOne (7<<4) /* Implied '1' */
68 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
70 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73 #define SrcAcc (0xd<<4) /* Source Accumulator */
74 #define SrcMask (0xf<<4)
75 /* Generic ModRM decode. */
77 /* Destination is only written; never read. */
80 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
81 #define String (1<<12) /* String instruction (rep capable) */
82 #define Stack (1<<13) /* Stack instruction (push/pop) */
83 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
84 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
85 #define GroupMask 0x0f /* Group number stored in bits 0:3 */
87 #define Undefined (1<<25) /* No Such Instruction */
88 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
89 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
91 /* Source 2 operand type */
92 #define Src2None (0<<29)
93 #define Src2CL (1<<29)
94 #define Src2ImmByte (2<<29)
95 #define Src2One (3<<29)
96 #define Src2Mask (7<<29)
99 #define X3(x) X2(x), x
100 #define X4(x) X2(x), X2(x)
101 #define X5(x) X4(x), x
102 #define X6(x) X4(x), X2(x)
103 #define X7(x) X4(x), X3(x)
104 #define X8(x) X4(x), X4(x)
105 #define X16(x) X8(x), X8(x)
108 NoGrp
, Group7
, Group8
, Group9
,
114 struct opcode
*group
;
115 struct group_dual
*gdual
;
120 struct opcode mod012
[8];
121 struct opcode mod3
[8];
124 #define D(_y) { .flags = (_y) }
126 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
127 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
129 static struct opcode group1
[] = {
133 static struct opcode group1A
[] = {
134 D(DstMem
| SrcNone
| ModRM
| Mov
| Stack
), N
, N
, N
, N
, N
, N
, N
,
137 static struct opcode group3
[] = {
138 D(DstMem
| SrcImm
| ModRM
), D(DstMem
| SrcImm
| ModRM
),
139 D(DstMem
| SrcNone
| ModRM
| Lock
), D(DstMem
| SrcNone
| ModRM
| Lock
),
143 static struct opcode group4
[] = {
144 D(ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
), D(ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
),
148 static struct opcode group5
[] = {
149 D(DstMem
| SrcNone
| ModRM
| Lock
), D(DstMem
| SrcNone
| ModRM
| Lock
),
150 D(SrcMem
| ModRM
| Stack
), N
,
151 D(SrcMem
| ModRM
| Stack
), D(SrcMemFAddr
| ModRM
| ImplicitOps
),
152 D(SrcMem
| ModRM
| Stack
), N
,
155 static struct opcode group_table
[] = {
157 N
, N
, D(ModRM
| SrcMem
| Priv
), D(ModRM
| SrcMem
| Priv
),
158 D(SrcNone
| ModRM
| DstMem
| Mov
), N
,
159 D(SrcMem16
| ModRM
| Mov
| Priv
), D(SrcMem
| ModRM
| ByteOp
| Priv
),
162 D(DstMem
| SrcImmByte
| ModRM
), D(DstMem
| SrcImmByte
| ModRM
| Lock
),
163 D(DstMem
| SrcImmByte
| ModRM
| Lock
), D(DstMem
| SrcImmByte
| ModRM
| Lock
),
165 N
, D(DstMem64
| ModRM
| Lock
), N
, N
, N
, N
, N
, N
,
168 static struct opcode group2_table
[] = {
170 D(SrcNone
| ModRM
| Priv
), N
, N
, D(SrcNone
| ModRM
| Priv
),
171 D(SrcNone
| ModRM
| DstMem
| Mov
), N
,
172 D(SrcMem16
| ModRM
| Mov
| Priv
), N
,
174 N
, N
, N
, N
, N
, N
, N
, N
,
177 static struct opcode opcode_table
[256] = {
179 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
180 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
181 D(ByteOp
| DstAcc
| SrcImm
), D(DstAcc
| SrcImm
),
182 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
184 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
185 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
186 D(ByteOp
| DstAcc
| SrcImm
), D(DstAcc
| SrcImm
),
187 D(ImplicitOps
| Stack
| No64
), N
,
189 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
190 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
191 D(ByteOp
| DstAcc
| SrcImm
), D(DstAcc
| SrcImm
),
192 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
194 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
195 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
196 D(ByteOp
| DstAcc
| SrcImm
), D(DstAcc
| SrcImm
),
197 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
199 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
200 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
201 D(ByteOp
| DstAcc
| SrcImmByte
), D(DstAcc
| SrcImm
), N
, N
,
203 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
204 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
205 D(ByteOp
| DstAcc
| SrcImmByte
), D(DstAcc
| SrcImm
), N
, N
,
207 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
208 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
209 D(ByteOp
| DstAcc
| SrcImmByte
), D(DstAcc
| SrcImm
), N
, N
,
211 D(ByteOp
| DstMem
| SrcReg
| ModRM
), D(DstMem
| SrcReg
| ModRM
),
212 D(ByteOp
| DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
213 D(ByteOp
| DstAcc
| SrcImm
), D(DstAcc
| SrcImm
),
218 X8(D(SrcReg
| Stack
)),
220 X8(D(DstReg
| Stack
)),
222 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
223 N
, D(DstReg
| SrcMem32
| ModRM
| Mov
) /* movsxd (x86/64) */ ,
226 D(SrcImm
| Mov
| Stack
), N
, D(SrcImmByte
| Mov
| Stack
), N
,
227 D(DstDI
| ByteOp
| Mov
| String
), D(DstDI
| Mov
| String
), /* insb, insw/insd */
228 D(SrcSI
| ByteOp
| ImplicitOps
| String
), D(SrcSI
| ImplicitOps
| String
), /* outsb, outsw/outsd */
232 G(ByteOp
| DstMem
| SrcImm
| ModRM
| Group
, group1
),
233 G(DstMem
| SrcImm
| ModRM
| Group
, group1
),
234 G(ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Group
, group1
),
235 G(DstMem
| SrcImmByte
| ModRM
| Group
, group1
),
236 D(ByteOp
| DstMem
| SrcReg
| ModRM
), D(DstMem
| SrcReg
| ModRM
),
237 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
239 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
), D(DstMem
| SrcReg
| ModRM
| Mov
),
240 D(ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
), D(DstReg
| SrcMem
| ModRM
| Mov
),
241 D(DstMem
| SrcNone
| ModRM
| Mov
), D(ModRM
| DstReg
),
242 D(ImplicitOps
| SrcMem16
| ModRM
), G(0, group1A
),
244 D(DstReg
), D(DstReg
), D(DstReg
), D(DstReg
), D(DstReg
), D(DstReg
), D(DstReg
), D(DstReg
),
246 N
, N
, D(SrcImmFAddr
| No64
), N
,
247 D(ImplicitOps
| Stack
), D(ImplicitOps
| Stack
), N
, N
,
249 D(ByteOp
| DstAcc
| SrcMem
| Mov
| MemAbs
), D(DstAcc
| SrcMem
| Mov
| MemAbs
),
250 D(ByteOp
| DstMem
| SrcAcc
| Mov
| MemAbs
), D(DstMem
| SrcAcc
| Mov
| MemAbs
),
251 D(ByteOp
| SrcSI
| DstDI
| Mov
| String
), D(SrcSI
| DstDI
| Mov
| String
),
252 D(ByteOp
| SrcSI
| DstDI
| String
), D(SrcSI
| DstDI
| String
),
254 D(DstAcc
| SrcImmByte
| ByteOp
), D(DstAcc
| SrcImm
), D(ByteOp
| DstDI
| Mov
| String
), D(DstDI
| Mov
| String
),
255 D(ByteOp
| SrcSI
| DstAcc
| Mov
| String
), D(SrcSI
| DstAcc
| Mov
| String
),
256 D(ByteOp
| DstDI
| String
), D(DstDI
| String
),
258 X8(D(ByteOp
| DstReg
| SrcImm
| Mov
)),
260 X8(D(DstReg
| SrcImm
| Mov
)),
262 D(ByteOp
| DstMem
| SrcImm
| ModRM
), D(DstMem
| SrcImmByte
| ModRM
),
263 N
, D(ImplicitOps
| Stack
), N
, N
,
264 D(ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
), D(DstMem
| SrcImm
| ModRM
| Mov
),
266 N
, N
, N
, D(ImplicitOps
| Stack
),
267 D(ImplicitOps
), D(SrcImmByte
), D(ImplicitOps
| No64
), D(ImplicitOps
),
269 D(ByteOp
| DstMem
| SrcImplicit
| ModRM
), D(DstMem
| SrcImplicit
| ModRM
),
270 D(ByteOp
| DstMem
| SrcImplicit
| ModRM
), D(DstMem
| SrcImplicit
| ModRM
),
273 N
, N
, N
, N
, N
, N
, N
, N
,
276 D(ByteOp
| SrcImmUByte
| DstAcc
), D(SrcImmUByte
| DstAcc
),
277 D(ByteOp
| SrcImmUByte
| DstAcc
), D(SrcImmUByte
| DstAcc
),
279 D(SrcImm
| Stack
), D(SrcImm
| ImplicitOps
),
280 D(SrcImmFAddr
| No64
), D(SrcImmByte
| ImplicitOps
),
281 D(SrcNone
| ByteOp
| DstAcc
), D(SrcNone
| DstAcc
),
282 D(SrcNone
| ByteOp
| DstAcc
), D(SrcNone
| DstAcc
),
285 D(ImplicitOps
| Priv
), D(ImplicitOps
), G(ByteOp
, group3
), G(0, group3
),
287 D(ImplicitOps
), N
, D(ImplicitOps
), D(ImplicitOps
),
288 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
291 static struct opcode twobyte_table
[256] = {
293 N
, D(Group
| GroupDual
| Group7
), N
, N
,
294 N
, D(ImplicitOps
), D(ImplicitOps
| Priv
), N
,
295 D(ImplicitOps
| Priv
), D(ImplicitOps
| Priv
), N
, N
,
296 N
, D(ImplicitOps
| ModRM
), N
, N
,
298 N
, N
, N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
), N
, N
, N
, N
, N
, N
, N
,
300 D(ModRM
| ImplicitOps
| Priv
), D(ModRM
| Priv
),
301 D(ModRM
| ImplicitOps
| Priv
), D(ModRM
| Priv
),
303 N
, N
, N
, N
, N
, N
, N
, N
,
305 D(ImplicitOps
| Priv
), N
, D(ImplicitOps
| Priv
), N
,
306 D(ImplicitOps
), D(ImplicitOps
| Priv
), N
, N
,
307 N
, N
, N
, N
, N
, N
, N
, N
,
309 X16(D(DstReg
| SrcMem
| ModRM
| Mov
)),
311 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
313 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
315 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
319 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
321 D(ImplicitOps
| Stack
), D(ImplicitOps
| Stack
),
322 N
, D(DstMem
| SrcReg
| ModRM
| BitOp
),
323 D(DstMem
| SrcReg
| Src2ImmByte
| ModRM
),
324 D(DstMem
| SrcReg
| Src2CL
| ModRM
), N
, N
,
326 D(ImplicitOps
| Stack
), D(ImplicitOps
| Stack
),
327 N
, D(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
),
328 D(DstMem
| SrcReg
| Src2ImmByte
| ModRM
),
329 D(DstMem
| SrcReg
| Src2CL
| ModRM
),
332 D(ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
), D(DstMem
| SrcReg
| ModRM
| Lock
),
333 N
, D(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
),
334 N
, N
, D(ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
),
335 D(DstReg
| SrcMem16
| ModRM
| Mov
),
338 D(Group
| Group8
), D(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
),
339 N
, N
, D(ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
),
340 D(DstReg
| SrcMem16
| ModRM
| Mov
),
342 N
, N
, N
, D(DstMem
| SrcReg
| ModRM
| Mov
),
343 N
, N
, N
, D(Group
| GroupDual
| Group9
),
344 N
, N
, N
, N
, N
, N
, N
, N
,
346 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
348 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
350 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
358 /* EFLAGS bit definitions. */
359 #define EFLG_ID (1<<21)
360 #define EFLG_VIP (1<<20)
361 #define EFLG_VIF (1<<19)
362 #define EFLG_AC (1<<18)
363 #define EFLG_VM (1<<17)
364 #define EFLG_RF (1<<16)
365 #define EFLG_IOPL (3<<12)
366 #define EFLG_NT (1<<14)
367 #define EFLG_OF (1<<11)
368 #define EFLG_DF (1<<10)
369 #define EFLG_IF (1<<9)
370 #define EFLG_TF (1<<8)
371 #define EFLG_SF (1<<7)
372 #define EFLG_ZF (1<<6)
373 #define EFLG_AF (1<<4)
374 #define EFLG_PF (1<<2)
375 #define EFLG_CF (1<<0)
377 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
378 #define EFLG_RESERVED_ONE_MASK 2
381 * Instruction emulation:
382 * Most instructions are emulated directly via a fragment of inline assembly
383 * code. This allows us to save/restore EFLAGS and thus very easily pick up
384 * any modified flags.
387 #if defined(CONFIG_X86_64)
388 #define _LO32 "k" /* force 32-bit operand */
389 #define _STK "%%rsp" /* stack pointer */
390 #elif defined(__i386__)
391 #define _LO32 "" /* force 32-bit operand */
392 #define _STK "%%esp" /* stack pointer */
396 * These EFLAGS bits are restored from saved value during emulation, and
397 * any changes are written back to the saved value after emulation.
399 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
401 /* Before executing instruction: restore necessary bits in EFLAGS. */
402 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
403 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
404 "movl %"_sav",%"_LO32 _tmp"; " \
407 "movl %"_msk",%"_LO32 _tmp"; " \
408 "andl %"_LO32 _tmp",("_STK"); " \
410 "notl %"_LO32 _tmp"; " \
411 "andl %"_LO32 _tmp",("_STK"); " \
412 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
414 "orl %"_LO32 _tmp",("_STK"); " \
418 /* After executing instruction: write-back necessary bits in EFLAGS. */
419 #define _POST_EFLAGS(_sav, _msk, _tmp) \
420 /* _sav |= EFLAGS & _msk; */ \
423 "andl %"_msk",%"_LO32 _tmp"; " \
424 "orl %"_LO32 _tmp",%"_sav"; "
432 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
434 __asm__ __volatile__ ( \
435 _PRE_EFLAGS("0", "4", "2") \
436 _op _suffix " %"_x"3,%1; " \
437 _POST_EFLAGS("0", "4", "2") \
438 : "=m" (_eflags), "=m" ((_dst).val), \
440 : _y ((_src).val), "i" (EFLAGS_MASK)); \
444 /* Raw emulation: instruction has two explicit operands. */
445 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
447 unsigned long _tmp; \
449 switch ((_dst).bytes) { \
451 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
454 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
457 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
462 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
464 unsigned long _tmp; \
465 switch ((_dst).bytes) { \
467 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
470 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
471 _wx, _wy, _lx, _ly, _qx, _qy); \
476 /* Source operand is byte-sized and may be restricted to just %cl. */
477 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
478 __emulate_2op(_op, _src, _dst, _eflags, \
479 "b", "c", "b", "c", "b", "c", "b", "c")
481 /* Source operand is byte, word, long or quad sized. */
482 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
483 __emulate_2op(_op, _src, _dst, _eflags, \
484 "b", "q", "w", "r", _LO32, "r", "", "r")
486 /* Source operand is word, long or quad sized. */
487 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
488 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
489 "w", "r", _LO32, "r", "", "r")
491 /* Instruction has three operands and one operand is stored in ECX register */
492 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
494 unsigned long _tmp; \
495 _type _clv = (_cl).val; \
496 _type _srcv = (_src).val; \
497 _type _dstv = (_dst).val; \
499 __asm__ __volatile__ ( \
500 _PRE_EFLAGS("0", "5", "2") \
501 _op _suffix " %4,%1 \n" \
502 _POST_EFLAGS("0", "5", "2") \
503 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
504 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
507 (_cl).val = (unsigned long) _clv; \
508 (_src).val = (unsigned long) _srcv; \
509 (_dst).val = (unsigned long) _dstv; \
512 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
514 switch ((_dst).bytes) { \
516 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
517 "w", unsigned short); \
520 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
521 "l", unsigned int); \
524 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
525 "q", unsigned long)); \
530 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
532 unsigned long _tmp; \
534 __asm__ __volatile__ ( \
535 _PRE_EFLAGS("0", "3", "2") \
536 _op _suffix " %1; " \
537 _POST_EFLAGS("0", "3", "2") \
538 : "=m" (_eflags), "+m" ((_dst).val), \
540 : "i" (EFLAGS_MASK)); \
543 /* Instruction has only one explicit operand (no source operand). */
544 #define emulate_1op(_op, _dst, _eflags) \
546 switch ((_dst).bytes) { \
547 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
548 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
549 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
550 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
554 /* Fetch next part of the instruction being emulated. */
555 #define insn_fetch(_type, _size, _eip) \
556 ({ unsigned long _x; \
557 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
558 if (rc != X86EMUL_CONTINUE) \
564 #define insn_fetch_arr(_arr, _size, _eip) \
565 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
566 if (rc != X86EMUL_CONTINUE) \
571 static inline unsigned long ad_mask(struct decode_cache
*c
)
573 return (1UL << (c
->ad_bytes
<< 3)) - 1;
576 /* Access/update address held in a register, based on addressing mode. */
577 static inline unsigned long
578 address_mask(struct decode_cache
*c
, unsigned long reg
)
580 if (c
->ad_bytes
== sizeof(unsigned long))
583 return reg
& ad_mask(c
);
586 static inline unsigned long
587 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
589 return base
+ address_mask(c
, reg
);
593 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
595 if (c
->ad_bytes
== sizeof(unsigned long))
598 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
601 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
603 register_address_increment(c
, &c
->eip
, rel
);
606 static void set_seg_override(struct decode_cache
*c
, int seg
)
608 c
->has_seg_override
= true;
609 c
->seg_override
= seg
;
612 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
,
613 struct x86_emulate_ops
*ops
, int seg
)
615 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
618 return ops
->get_cached_segment_base(seg
, ctxt
->vcpu
);
621 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
622 struct x86_emulate_ops
*ops
,
623 struct decode_cache
*c
)
625 if (!c
->has_seg_override
)
628 return seg_base(ctxt
, ops
, c
->seg_override
);
631 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
,
632 struct x86_emulate_ops
*ops
)
634 return seg_base(ctxt
, ops
, VCPU_SREG_ES
);
637 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
,
638 struct x86_emulate_ops
*ops
)
640 return seg_base(ctxt
, ops
, VCPU_SREG_SS
);
643 static void emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
644 u32 error
, bool valid
)
646 ctxt
->exception
= vec
;
647 ctxt
->error_code
= error
;
648 ctxt
->error_code_valid
= valid
;
649 ctxt
->restart
= false;
652 static void emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
654 emulate_exception(ctxt
, GP_VECTOR
, err
, true);
657 static void emulate_pf(struct x86_emulate_ctxt
*ctxt
, unsigned long addr
,
661 emulate_exception(ctxt
, PF_VECTOR
, err
, true);
664 static void emulate_ud(struct x86_emulate_ctxt
*ctxt
)
666 emulate_exception(ctxt
, UD_VECTOR
, 0, false);
669 static void emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
671 emulate_exception(ctxt
, TS_VECTOR
, err
, true);
674 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
675 struct x86_emulate_ops
*ops
,
676 unsigned long eip
, u8
*dest
)
678 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
682 if (eip
== fc
->end
) {
683 cur_size
= fc
->end
- fc
->start
;
684 size
= min(15UL - cur_size
, PAGE_SIZE
- offset_in_page(eip
));
685 rc
= ops
->fetch(ctxt
->cs_base
+ eip
, fc
->data
+ cur_size
,
686 size
, ctxt
->vcpu
, NULL
);
687 if (rc
!= X86EMUL_CONTINUE
)
691 *dest
= fc
->data
[eip
- fc
->start
];
692 return X86EMUL_CONTINUE
;
695 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
696 struct x86_emulate_ops
*ops
,
697 unsigned long eip
, void *dest
, unsigned size
)
701 /* x86 instructions are limited to 15 bytes. */
702 if (eip
+ size
- ctxt
->eip
> 15)
703 return X86EMUL_UNHANDLEABLE
;
705 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
706 if (rc
!= X86EMUL_CONTINUE
)
709 return X86EMUL_CONTINUE
;
713 * Given the 'reg' portion of a ModRM byte, and a register block, return a
714 * pointer into the block that addresses the relevant register.
715 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
717 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
722 p
= ®s
[modrm_reg
];
723 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
724 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
728 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
729 struct x86_emulate_ops
*ops
,
731 u16
*size
, unsigned long *address
, int op_bytes
)
738 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
740 if (rc
!= X86EMUL_CONTINUE
)
742 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
747 static int test_cc(unsigned int condition
, unsigned int flags
)
751 switch ((condition
& 15) >> 1) {
753 rc
|= (flags
& EFLG_OF
);
755 case 1: /* b/c/nae */
756 rc
|= (flags
& EFLG_CF
);
759 rc
|= (flags
& EFLG_ZF
);
762 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
765 rc
|= (flags
& EFLG_SF
);
768 rc
|= (flags
& EFLG_PF
);
771 rc
|= (flags
& EFLG_ZF
);
774 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
778 /* Odd condition identifiers (lsb == 1) have inverted sense. */
779 return (!!rc
^ (condition
& 1));
782 static void decode_register_operand(struct operand
*op
,
783 struct decode_cache
*c
,
786 unsigned reg
= c
->modrm_reg
;
787 int highbyte_regs
= c
->rex_prefix
== 0;
790 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
792 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
793 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
794 op
->val
= *(u8
*)op
->ptr
;
797 op
->ptr
= decode_register(reg
, c
->regs
, 0);
798 op
->bytes
= c
->op_bytes
;
801 op
->val
= *(u16
*)op
->ptr
;
804 op
->val
= *(u32
*)op
->ptr
;
807 op
->val
= *(u64
*) op
->ptr
;
811 op
->orig_val
= op
->val
;
814 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
815 struct x86_emulate_ops
*ops
)
817 struct decode_cache
*c
= &ctxt
->decode
;
819 int index_reg
= 0, base_reg
= 0, scale
;
820 int rc
= X86EMUL_CONTINUE
;
823 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
824 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
825 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
828 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
829 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
830 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
831 c
->modrm_rm
|= (c
->modrm
& 0x07);
835 if (c
->modrm_mod
== 3) {
836 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
837 c
->regs
, c
->d
& ByteOp
);
838 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
842 if (c
->ad_bytes
== 2) {
843 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
844 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
845 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
846 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
848 /* 16-bit ModR/M decode. */
849 switch (c
->modrm_mod
) {
851 if (c
->modrm_rm
== 6)
852 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
855 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
858 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
861 switch (c
->modrm_rm
) {
863 c
->modrm_ea
+= bx
+ si
;
866 c
->modrm_ea
+= bx
+ di
;
869 c
->modrm_ea
+= bp
+ si
;
872 c
->modrm_ea
+= bp
+ di
;
881 if (c
->modrm_mod
!= 0)
888 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
889 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
890 if (!c
->has_seg_override
)
891 set_seg_override(c
, VCPU_SREG_SS
);
892 c
->modrm_ea
= (u16
)c
->modrm_ea
;
894 /* 32/64-bit ModR/M decode. */
895 if ((c
->modrm_rm
& 7) == 4) {
896 sib
= insn_fetch(u8
, 1, c
->eip
);
897 index_reg
|= (sib
>> 3) & 7;
901 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
902 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
904 c
->modrm_ea
+= c
->regs
[base_reg
];
906 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
907 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
908 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
911 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
912 switch (c
->modrm_mod
) {
914 if (c
->modrm_rm
== 5)
915 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
918 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
921 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
929 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
930 struct x86_emulate_ops
*ops
)
932 struct decode_cache
*c
= &ctxt
->decode
;
933 int rc
= X86EMUL_CONTINUE
;
935 switch (c
->ad_bytes
) {
937 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
940 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
943 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
951 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
953 struct decode_cache
*c
= &ctxt
->decode
;
954 int rc
= X86EMUL_CONTINUE
;
955 int mode
= ctxt
->mode
;
956 int def_op_bytes
, def_ad_bytes
, group
, dual
, goffset
;
957 struct opcode opcode
, *g_mod012
, *g_mod3
;
959 /* we cannot decode insn before we complete previous rep insn */
960 WARN_ON(ctxt
->restart
);
963 c
->fetch
.start
= c
->fetch
.end
= c
->eip
;
964 ctxt
->cs_base
= seg_base(ctxt
, ops
, VCPU_SREG_CS
);
967 case X86EMUL_MODE_REAL
:
968 case X86EMUL_MODE_VM86
:
969 case X86EMUL_MODE_PROT16
:
970 def_op_bytes
= def_ad_bytes
= 2;
972 case X86EMUL_MODE_PROT32
:
973 def_op_bytes
= def_ad_bytes
= 4;
976 case X86EMUL_MODE_PROT64
:
985 c
->op_bytes
= def_op_bytes
;
986 c
->ad_bytes
= def_ad_bytes
;
988 /* Legacy prefixes. */
990 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
991 case 0x66: /* operand-size override */
992 /* switch between 2/4 bytes */
993 c
->op_bytes
= def_op_bytes
^ 6;
995 case 0x67: /* address-size override */
996 if (mode
== X86EMUL_MODE_PROT64
)
997 /* switch between 4/8 bytes */
998 c
->ad_bytes
= def_ad_bytes
^ 12;
1000 /* switch between 2/4 bytes */
1001 c
->ad_bytes
= def_ad_bytes
^ 6;
1003 case 0x26: /* ES override */
1004 case 0x2e: /* CS override */
1005 case 0x36: /* SS override */
1006 case 0x3e: /* DS override */
1007 set_seg_override(c
, (c
->b
>> 3) & 3);
1009 case 0x64: /* FS override */
1010 case 0x65: /* GS override */
1011 set_seg_override(c
, c
->b
& 7);
1013 case 0x40 ... 0x4f: /* REX */
1014 if (mode
!= X86EMUL_MODE_PROT64
)
1016 c
->rex_prefix
= c
->b
;
1018 case 0xf0: /* LOCK */
1021 case 0xf2: /* REPNE/REPNZ */
1022 c
->rep_prefix
= REPNE_PREFIX
;
1024 case 0xf3: /* REP/REPE/REPZ */
1025 c
->rep_prefix
= REPE_PREFIX
;
1031 /* Any legacy prefix after a REX prefix nullifies its effect. */
1040 if (c
->rex_prefix
& 8)
1041 c
->op_bytes
= 8; /* REX.W */
1043 /* Opcode byte(s). */
1044 opcode
= opcode_table
[c
->b
];
1045 if (opcode
.flags
== 0) {
1046 /* Two-byte opcode? */
1049 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1050 opcode
= twobyte_table
[c
->b
];
1053 c
->d
= opcode
.flags
;
1056 group
= c
->d
& GroupMask
;
1057 dual
= c
->d
& GroupDual
;
1058 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1062 g_mod012
= g_mod3
= &group_table
[group
* 8];
1063 if (c
->d
& GroupDual
)
1064 g_mod3
= &group2_table
[group
* 8];
1066 if (c
->d
& GroupDual
) {
1067 g_mod012
= opcode
.u
.gdual
->mod012
;
1068 g_mod3
= opcode
.u
.gdual
->mod3
;
1070 g_mod012
= g_mod3
= opcode
.u
.group
;
1073 c
->d
&= ~(Group
| GroupDual
| GroupMask
);
1075 goffset
= (c
->modrm
>> 3) & 7;
1077 if ((c
->modrm
>> 6) == 3)
1078 opcode
= g_mod3
[goffset
];
1080 opcode
= g_mod012
[goffset
];
1081 c
->d
|= opcode
.flags
;
1085 if (c
->d
== 0 || (c
->d
& Undefined
)) {
1086 DPRINTF("Cannot emulate %02x\n", c
->b
);
1090 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1093 /* ModRM and SIB bytes. */
1095 rc
= decode_modrm(ctxt
, ops
);
1096 else if (c
->d
& MemAbs
)
1097 rc
= decode_abs(ctxt
, ops
);
1098 if (rc
!= X86EMUL_CONTINUE
)
1101 if (!c
->has_seg_override
)
1102 set_seg_override(c
, VCPU_SREG_DS
);
1104 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1105 c
->modrm_ea
+= seg_override_base(ctxt
, ops
, c
);
1107 if (c
->ad_bytes
!= 8)
1108 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1110 if (c
->rip_relative
)
1111 c
->modrm_ea
+= c
->eip
;
1114 * Decode and fetch the source operand: register, memory
1117 switch (c
->d
& SrcMask
) {
1121 decode_register_operand(&c
->src
, c
, 0);
1130 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1132 /* Don't fetch the address for invlpg: it could be unmapped. */
1133 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1137 * For instructions with a ModR/M byte, switch to register
1138 * access if Mod = 3.
1140 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1141 c
->src
.type
= OP_REG
;
1142 c
->src
.val
= c
->modrm_val
;
1143 c
->src
.ptr
= c
->modrm_ptr
;
1146 c
->src
.type
= OP_MEM
;
1147 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1152 c
->src
.type
= OP_IMM
;
1153 c
->src
.ptr
= (unsigned long *)c
->eip
;
1154 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1155 if (c
->src
.bytes
== 8)
1157 /* NB. Immediates are sign-extended as necessary. */
1158 switch (c
->src
.bytes
) {
1160 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1163 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1166 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1169 if ((c
->d
& SrcMask
) == SrcImmU
) {
1170 switch (c
->src
.bytes
) {
1175 c
->src
.val
&= 0xffff;
1178 c
->src
.val
&= 0xffffffff;
1185 c
->src
.type
= OP_IMM
;
1186 c
->src
.ptr
= (unsigned long *)c
->eip
;
1188 if ((c
->d
& SrcMask
) == SrcImmByte
)
1189 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1191 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1194 c
->src
.type
= OP_REG
;
1195 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1196 c
->src
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1197 switch (c
->src
.bytes
) {
1199 c
->src
.val
= *(u8
*)c
->src
.ptr
;
1202 c
->src
.val
= *(u16
*)c
->src
.ptr
;
1205 c
->src
.val
= *(u32
*)c
->src
.ptr
;
1208 c
->src
.val
= *(u64
*)c
->src
.ptr
;
1217 c
->src
.type
= OP_MEM
;
1218 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1219 c
->src
.ptr
= (unsigned long *)
1220 register_address(c
, seg_override_base(ctxt
, ops
, c
),
1221 c
->regs
[VCPU_REGS_RSI
]);
1225 c
->src
.type
= OP_IMM
;
1226 c
->src
.ptr
= (unsigned long *)c
->eip
;
1227 c
->src
.bytes
= c
->op_bytes
+ 2;
1228 insn_fetch_arr(c
->src
.valptr
, c
->src
.bytes
, c
->eip
);
1231 c
->src
.type
= OP_MEM
;
1232 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1233 c
->src
.bytes
= c
->op_bytes
+ 2;
1238 * Decode and fetch the second source operand: register, memory
1241 switch (c
->d
& Src2Mask
) {
1246 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1249 c
->src2
.type
= OP_IMM
;
1250 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1252 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1260 /* Decode and fetch the destination operand: register or memory. */
1261 switch (c
->d
& DstMask
) {
1263 /* Special instructions do their own operand decoding. */
1266 decode_register_operand(&c
->dst
, c
,
1267 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1271 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1272 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1273 c
->dst
.type
= OP_REG
;
1274 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1275 c
->dst
.ptr
= c
->modrm_ptr
;
1278 c
->dst
.type
= OP_MEM
;
1279 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1280 if ((c
->d
& DstMask
) == DstMem64
)
1283 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1286 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1288 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1289 (c
->src
.val
& mask
) / 8;
1293 c
->dst
.type
= OP_REG
;
1294 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1295 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1296 switch (c
->dst
.bytes
) {
1298 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1301 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1304 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1307 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1310 c
->dst
.orig_val
= c
->dst
.val
;
1313 c
->dst
.type
= OP_MEM
;
1314 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1315 c
->dst
.ptr
= (unsigned long *)
1316 register_address(c
, es_base(ctxt
, ops
),
1317 c
->regs
[VCPU_REGS_RDI
]);
1323 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1326 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1327 struct x86_emulate_ops
*ops
,
1328 unsigned long addr
, void *dest
, unsigned size
)
1331 struct read_cache
*mc
= &ctxt
->decode
.mem_read
;
1335 int n
= min(size
, 8u);
1337 if (mc
->pos
< mc
->end
)
1340 rc
= ops
->read_emulated(addr
, mc
->data
+ mc
->end
, n
, &err
,
1342 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1343 emulate_pf(ctxt
, addr
, err
);
1344 if (rc
!= X86EMUL_CONTINUE
)
1349 memcpy(dest
, mc
->data
+ mc
->pos
, n
);
1354 return X86EMUL_CONTINUE
;
1357 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1358 struct x86_emulate_ops
*ops
,
1359 unsigned int size
, unsigned short port
,
1362 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1364 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1365 struct decode_cache
*c
= &ctxt
->decode
;
1366 unsigned int in_page
, n
;
1367 unsigned int count
= c
->rep_prefix
?
1368 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1369 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1370 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1371 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1372 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1376 rc
->pos
= rc
->end
= 0;
1377 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1382 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1387 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1389 u32 limit
= get_desc_limit(desc
);
1391 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1394 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1395 struct x86_emulate_ops
*ops
,
1396 u16 selector
, struct desc_ptr
*dt
)
1398 if (selector
& 1 << 2) {
1399 struct desc_struct desc
;
1400 memset (dt
, 0, sizeof *dt
);
1401 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1404 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1405 dt
->address
= get_desc_base(&desc
);
1407 ops
->get_gdt(dt
, ctxt
->vcpu
);
1410 /* allowed just for 8 bytes segments */
1411 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1412 struct x86_emulate_ops
*ops
,
1413 u16 selector
, struct desc_struct
*desc
)
1416 u16 index
= selector
>> 3;
1421 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1423 if (dt
.size
< index
* 8 + 7) {
1424 emulate_gp(ctxt
, selector
& 0xfffc);
1425 return X86EMUL_PROPAGATE_FAULT
;
1427 addr
= dt
.address
+ index
* 8;
1428 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1429 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1430 emulate_pf(ctxt
, addr
, err
);
1435 /* allowed just for 8 bytes segments */
1436 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1437 struct x86_emulate_ops
*ops
,
1438 u16 selector
, struct desc_struct
*desc
)
1441 u16 index
= selector
>> 3;
1446 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1448 if (dt
.size
< index
* 8 + 7) {
1449 emulate_gp(ctxt
, selector
& 0xfffc);
1450 return X86EMUL_PROPAGATE_FAULT
;
1453 addr
= dt
.address
+ index
* 8;
1454 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1455 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1456 emulate_pf(ctxt
, addr
, err
);
1461 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1462 struct x86_emulate_ops
*ops
,
1463 u16 selector
, int seg
)
1465 struct desc_struct seg_desc
;
1467 unsigned err_vec
= GP_VECTOR
;
1469 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1472 memset(&seg_desc
, 0, sizeof seg_desc
);
1474 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1475 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1476 /* set real mode segment descriptor */
1477 set_desc_base(&seg_desc
, selector
<< 4);
1478 set_desc_limit(&seg_desc
, 0xffff);
1485 /* NULL selector is not valid for TR, CS and SS */
1486 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1490 /* TR should be in GDT only */
1491 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1494 if (null_selector
) /* for NULL selector skip all following checks */
1497 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1498 if (ret
!= X86EMUL_CONTINUE
)
1501 err_code
= selector
& 0xfffc;
1502 err_vec
= GP_VECTOR
;
1504 /* can't load system descriptor into segment selecor */
1505 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1509 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1515 cpl
= ops
->cpl(ctxt
->vcpu
);
1520 * segment is not a writable data segment or segment
1521 * selector's RPL != CPL or segment selector's RPL != CPL
1523 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1527 if (!(seg_desc
.type
& 8))
1530 if (seg_desc
.type
& 4) {
1536 if (rpl
> cpl
|| dpl
!= cpl
)
1539 /* CS(RPL) <- CPL */
1540 selector
= (selector
& 0xfffc) | cpl
;
1543 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1546 case VCPU_SREG_LDTR
:
1547 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1550 default: /* DS, ES, FS, or GS */
1552 * segment is not a data or readable code segment or
1553 * ((segment is a data or nonconforming code segment)
1554 * and (both RPL and CPL > DPL))
1556 if ((seg_desc
.type
& 0xa) == 0x8 ||
1557 (((seg_desc
.type
& 0xc) != 0xc) &&
1558 (rpl
> dpl
&& cpl
> dpl
)))
1564 /* mark segment as accessed */
1566 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1567 if (ret
!= X86EMUL_CONTINUE
)
1571 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1572 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1573 return X86EMUL_CONTINUE
;
1575 emulate_exception(ctxt
, err_vec
, err_code
, true);
1576 return X86EMUL_PROPAGATE_FAULT
;
1579 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1580 struct x86_emulate_ops
*ops
)
1583 struct decode_cache
*c
= &ctxt
->decode
;
1586 switch (c
->dst
.type
) {
1588 /* The 4-byte case *is* correct:
1589 * in 64-bit mode we zero-extend.
1591 switch (c
->dst
.bytes
) {
1593 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1596 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1599 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1600 break; /* 64b: zero-ext */
1602 *c
->dst
.ptr
= c
->dst
.val
;
1608 rc
= ops
->cmpxchg_emulated(
1609 (unsigned long)c
->dst
.ptr
,
1616 rc
= ops
->write_emulated(
1617 (unsigned long)c
->dst
.ptr
,
1622 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1624 (unsigned long)c
->dst
.ptr
, err
);
1625 if (rc
!= X86EMUL_CONTINUE
)
1634 return X86EMUL_CONTINUE
;
1637 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
,
1638 struct x86_emulate_ops
*ops
)
1640 struct decode_cache
*c
= &ctxt
->decode
;
1642 c
->dst
.type
= OP_MEM
;
1643 c
->dst
.bytes
= c
->op_bytes
;
1644 c
->dst
.val
= c
->src
.val
;
1645 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1646 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
, ops
),
1647 c
->regs
[VCPU_REGS_RSP
]);
1650 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1651 struct x86_emulate_ops
*ops
,
1652 void *dest
, int len
)
1654 struct decode_cache
*c
= &ctxt
->decode
;
1657 rc
= read_emulated(ctxt
, ops
, register_address(c
, ss_base(ctxt
, ops
),
1658 c
->regs
[VCPU_REGS_RSP
]),
1660 if (rc
!= X86EMUL_CONTINUE
)
1663 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1667 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1668 struct x86_emulate_ops
*ops
,
1669 void *dest
, int len
)
1672 unsigned long val
, change_mask
;
1673 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1674 int cpl
= ops
->cpl(ctxt
->vcpu
);
1676 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1677 if (rc
!= X86EMUL_CONTINUE
)
1680 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1681 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1683 switch(ctxt
->mode
) {
1684 case X86EMUL_MODE_PROT64
:
1685 case X86EMUL_MODE_PROT32
:
1686 case X86EMUL_MODE_PROT16
:
1688 change_mask
|= EFLG_IOPL
;
1690 change_mask
|= EFLG_IF
;
1692 case X86EMUL_MODE_VM86
:
1694 emulate_gp(ctxt
, 0);
1695 return X86EMUL_PROPAGATE_FAULT
;
1697 change_mask
|= EFLG_IF
;
1699 default: /* real mode */
1700 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1704 *(unsigned long *)dest
=
1705 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1710 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
,
1711 struct x86_emulate_ops
*ops
, int seg
)
1713 struct decode_cache
*c
= &ctxt
->decode
;
1715 c
->src
.val
= ops
->get_segment_selector(seg
, ctxt
->vcpu
);
1717 emulate_push(ctxt
, ops
);
1720 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1721 struct x86_emulate_ops
*ops
, int seg
)
1723 struct decode_cache
*c
= &ctxt
->decode
;
1724 unsigned long selector
;
1727 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1728 if (rc
!= X86EMUL_CONTINUE
)
1731 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1735 static int emulate_pusha(struct x86_emulate_ctxt
*ctxt
,
1736 struct x86_emulate_ops
*ops
)
1738 struct decode_cache
*c
= &ctxt
->decode
;
1739 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1740 int rc
= X86EMUL_CONTINUE
;
1741 int reg
= VCPU_REGS_RAX
;
1743 while (reg
<= VCPU_REGS_RDI
) {
1744 (reg
== VCPU_REGS_RSP
) ?
1745 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1747 emulate_push(ctxt
, ops
);
1749 rc
= writeback(ctxt
, ops
);
1750 if (rc
!= X86EMUL_CONTINUE
)
1756 /* Disable writeback. */
1757 c
->dst
.type
= OP_NONE
;
1762 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1763 struct x86_emulate_ops
*ops
)
1765 struct decode_cache
*c
= &ctxt
->decode
;
1766 int rc
= X86EMUL_CONTINUE
;
1767 int reg
= VCPU_REGS_RDI
;
1769 while (reg
>= VCPU_REGS_RAX
) {
1770 if (reg
== VCPU_REGS_RSP
) {
1771 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1776 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1777 if (rc
!= X86EMUL_CONTINUE
)
1784 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
,
1785 struct x86_emulate_ops
*ops
)
1787 struct decode_cache
*c
= &ctxt
->decode
;
1788 int rc
= X86EMUL_CONTINUE
;
1789 unsigned long temp_eip
= 0;
1790 unsigned long temp_eflags
= 0;
1791 unsigned long cs
= 0;
1792 unsigned long mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_TF
|
1793 EFLG_IF
| EFLG_DF
| EFLG_OF
| EFLG_IOPL
| EFLG_NT
| EFLG_RF
|
1794 EFLG_AC
| EFLG_ID
| (1 << 1); /* Last one is the reserved bit */
1795 unsigned long vm86_mask
= EFLG_VM
| EFLG_VIF
| EFLG_VIP
;
1797 /* TODO: Add stack limit check */
1799 rc
= emulate_pop(ctxt
, ops
, &temp_eip
, c
->op_bytes
);
1801 if (rc
!= X86EMUL_CONTINUE
)
1804 if (temp_eip
& ~0xffff) {
1805 emulate_gp(ctxt
, 0);
1806 return X86EMUL_PROPAGATE_FAULT
;
1809 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1811 if (rc
!= X86EMUL_CONTINUE
)
1814 rc
= emulate_pop(ctxt
, ops
, &temp_eflags
, c
->op_bytes
);
1816 if (rc
!= X86EMUL_CONTINUE
)
1819 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1821 if (rc
!= X86EMUL_CONTINUE
)
1827 if (c
->op_bytes
== 4)
1828 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
1829 else if (c
->op_bytes
== 2) {
1830 ctxt
->eflags
&= ~0xffff;
1831 ctxt
->eflags
|= temp_eflags
;
1834 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
1835 ctxt
->eflags
|= EFLG_RESERVED_ONE_MASK
;
1840 static inline int emulate_iret(struct x86_emulate_ctxt
*ctxt
,
1841 struct x86_emulate_ops
* ops
)
1843 switch(ctxt
->mode
) {
1844 case X86EMUL_MODE_REAL
:
1845 return emulate_iret_real(ctxt
, ops
);
1846 case X86EMUL_MODE_VM86
:
1847 case X86EMUL_MODE_PROT16
:
1848 case X86EMUL_MODE_PROT32
:
1849 case X86EMUL_MODE_PROT64
:
1851 /* iret from protected mode unimplemented yet */
1852 return X86EMUL_UNHANDLEABLE
;
1856 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1857 struct x86_emulate_ops
*ops
)
1859 struct decode_cache
*c
= &ctxt
->decode
;
1861 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1864 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1866 struct decode_cache
*c
= &ctxt
->decode
;
1867 switch (c
->modrm_reg
) {
1869 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1872 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1875 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1878 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1880 case 4: /* sal/shl */
1881 case 6: /* sal/shl */
1882 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1885 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1888 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1893 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1894 struct x86_emulate_ops
*ops
)
1896 struct decode_cache
*c
= &ctxt
->decode
;
1898 switch (c
->modrm_reg
) {
1899 case 0 ... 1: /* test */
1900 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1903 c
->dst
.val
= ~c
->dst
.val
;
1906 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1914 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1915 struct x86_emulate_ops
*ops
)
1917 struct decode_cache
*c
= &ctxt
->decode
;
1919 switch (c
->modrm_reg
) {
1921 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1924 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1926 case 2: /* call near abs */ {
1929 c
->eip
= c
->src
.val
;
1930 c
->src
.val
= old_eip
;
1931 emulate_push(ctxt
, ops
);
1934 case 4: /* jmp abs */
1935 c
->eip
= c
->src
.val
;
1938 emulate_push(ctxt
, ops
);
1941 return X86EMUL_CONTINUE
;
1944 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1945 struct x86_emulate_ops
*ops
)
1947 struct decode_cache
*c
= &ctxt
->decode
;
1948 u64 old
= c
->dst
.orig_val64
;
1950 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1951 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1952 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1953 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1954 ctxt
->eflags
&= ~EFLG_ZF
;
1956 c
->dst
.val64
= ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1957 (u32
) c
->regs
[VCPU_REGS_RBX
];
1959 ctxt
->eflags
|= EFLG_ZF
;
1961 return X86EMUL_CONTINUE
;
1964 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1965 struct x86_emulate_ops
*ops
)
1967 struct decode_cache
*c
= &ctxt
->decode
;
1971 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1972 if (rc
!= X86EMUL_CONTINUE
)
1974 if (c
->op_bytes
== 4)
1975 c
->eip
= (u32
)c
->eip
;
1976 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1977 if (rc
!= X86EMUL_CONTINUE
)
1979 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1984 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1985 struct x86_emulate_ops
*ops
, struct desc_struct
*cs
,
1986 struct desc_struct
*ss
)
1988 memset(cs
, 0, sizeof(struct desc_struct
));
1989 ops
->get_cached_descriptor(cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1990 memset(ss
, 0, sizeof(struct desc_struct
));
1992 cs
->l
= 0; /* will be adjusted later */
1993 set_desc_base(cs
, 0); /* flat segment */
1994 cs
->g
= 1; /* 4kb granularity */
1995 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
1996 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1998 cs
->dpl
= 0; /* will be adjusted later */
2002 set_desc_base(ss
, 0); /* flat segment */
2003 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2004 ss
->g
= 1; /* 4kb granularity */
2006 ss
->type
= 0x03; /* Read/Write, Accessed */
2007 ss
->d
= 1; /* 32bit stack segment */
2013 emulate_syscall(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2015 struct decode_cache
*c
= &ctxt
->decode
;
2016 struct desc_struct cs
, ss
;
2020 /* syscall is not available in real mode */
2021 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2022 ctxt
->mode
== X86EMUL_MODE_VM86
) {
2024 return X86EMUL_PROPAGATE_FAULT
;
2027 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2029 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
2031 cs_sel
= (u16
)(msr_data
& 0xfffc);
2032 ss_sel
= (u16
)(msr_data
+ 8);
2034 if (is_long_mode(ctxt
->vcpu
)) {
2038 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2039 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2040 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2041 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2043 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
2044 if (is_long_mode(ctxt
->vcpu
)) {
2045 #ifdef CONFIG_X86_64
2046 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
2048 ops
->get_msr(ctxt
->vcpu
,
2049 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2050 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2053 ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
2054 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
2058 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
2059 c
->eip
= (u32
)msr_data
;
2061 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2064 return X86EMUL_CONTINUE
;
2068 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2070 struct decode_cache
*c
= &ctxt
->decode
;
2071 struct desc_struct cs
, ss
;
2075 /* inject #GP if in real mode */
2076 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
2077 emulate_gp(ctxt
, 0);
2078 return X86EMUL_PROPAGATE_FAULT
;
2081 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2082 * Therefore, we inject an #UD.
2084 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
2086 return X86EMUL_PROPAGATE_FAULT
;
2089 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2091 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2092 switch (ctxt
->mode
) {
2093 case X86EMUL_MODE_PROT32
:
2094 if ((msr_data
& 0xfffc) == 0x0) {
2095 emulate_gp(ctxt
, 0);
2096 return X86EMUL_PROPAGATE_FAULT
;
2099 case X86EMUL_MODE_PROT64
:
2100 if (msr_data
== 0x0) {
2101 emulate_gp(ctxt
, 0);
2102 return X86EMUL_PROPAGATE_FAULT
;
2107 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2108 cs_sel
= (u16
)msr_data
;
2109 cs_sel
&= ~SELECTOR_RPL_MASK
;
2110 ss_sel
= cs_sel
+ 8;
2111 ss_sel
&= ~SELECTOR_RPL_MASK
;
2112 if (ctxt
->mode
== X86EMUL_MODE_PROT64
2113 || is_long_mode(ctxt
->vcpu
)) {
2118 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2119 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2120 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2121 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2123 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2126 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2127 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
2129 return X86EMUL_CONTINUE
;
2133 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2135 struct decode_cache
*c
= &ctxt
->decode
;
2136 struct desc_struct cs
, ss
;
2141 /* inject #GP if in real mode or Virtual 8086 mode */
2142 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2143 ctxt
->mode
== X86EMUL_MODE_VM86
) {
2144 emulate_gp(ctxt
, 0);
2145 return X86EMUL_PROPAGATE_FAULT
;
2148 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2150 if ((c
->rex_prefix
& 0x8) != 0x0)
2151 usermode
= X86EMUL_MODE_PROT64
;
2153 usermode
= X86EMUL_MODE_PROT32
;
2157 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2159 case X86EMUL_MODE_PROT32
:
2160 cs_sel
= (u16
)(msr_data
+ 16);
2161 if ((msr_data
& 0xfffc) == 0x0) {
2162 emulate_gp(ctxt
, 0);
2163 return X86EMUL_PROPAGATE_FAULT
;
2165 ss_sel
= (u16
)(msr_data
+ 24);
2167 case X86EMUL_MODE_PROT64
:
2168 cs_sel
= (u16
)(msr_data
+ 32);
2169 if (msr_data
== 0x0) {
2170 emulate_gp(ctxt
, 0);
2171 return X86EMUL_PROPAGATE_FAULT
;
2173 ss_sel
= cs_sel
+ 8;
2178 cs_sel
|= SELECTOR_RPL_MASK
;
2179 ss_sel
|= SELECTOR_RPL_MASK
;
2181 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2182 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2183 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2184 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2186 c
->eip
= c
->regs
[VCPU_REGS_RDX
];
2187 c
->regs
[VCPU_REGS_RSP
] = c
->regs
[VCPU_REGS_RCX
];
2189 return X86EMUL_CONTINUE
;
2192 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
2193 struct x86_emulate_ops
*ops
)
2196 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2198 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2200 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2201 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2204 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2205 struct x86_emulate_ops
*ops
,
2208 struct desc_struct tr_seg
;
2211 u8 perm
, bit_idx
= port
& 0x7;
2212 unsigned mask
= (1 << len
) - 1;
2214 ops
->get_cached_descriptor(&tr_seg
, VCPU_SREG_TR
, ctxt
->vcpu
);
2217 if (desc_limit_scaled(&tr_seg
) < 103)
2219 r
= ops
->read_std(get_desc_base(&tr_seg
) + 102, &io_bitmap_ptr
, 2,
2221 if (r
!= X86EMUL_CONTINUE
)
2223 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2225 r
= ops
->read_std(get_desc_base(&tr_seg
) + io_bitmap_ptr
+ port
/8,
2226 &perm
, 1, ctxt
->vcpu
, NULL
);
2227 if (r
!= X86EMUL_CONTINUE
)
2229 if ((perm
>> bit_idx
) & mask
)
2234 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2235 struct x86_emulate_ops
*ops
,
2238 if (emulator_bad_iopl(ctxt
, ops
))
2239 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2244 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2245 struct x86_emulate_ops
*ops
,
2246 struct tss_segment_16
*tss
)
2248 struct decode_cache
*c
= &ctxt
->decode
;
2251 tss
->flag
= ctxt
->eflags
;
2252 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2253 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2254 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2255 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2256 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2257 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2258 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2259 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2261 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2262 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2263 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2264 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2265 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2268 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2269 struct x86_emulate_ops
*ops
,
2270 struct tss_segment_16
*tss
)
2272 struct decode_cache
*c
= &ctxt
->decode
;
2276 ctxt
->eflags
= tss
->flag
| 2;
2277 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2278 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2279 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2280 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2281 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2282 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2283 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2284 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2287 * SDM says that segment selectors are loaded before segment
2290 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2291 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2292 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2293 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2294 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2297 * Now load segment descriptors. If fault happenes at this stage
2298 * it is handled in a context of new task
2300 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2301 if (ret
!= X86EMUL_CONTINUE
)
2303 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2304 if (ret
!= X86EMUL_CONTINUE
)
2306 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2307 if (ret
!= X86EMUL_CONTINUE
)
2309 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2310 if (ret
!= X86EMUL_CONTINUE
)
2312 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2313 if (ret
!= X86EMUL_CONTINUE
)
2316 return X86EMUL_CONTINUE
;
2319 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2320 struct x86_emulate_ops
*ops
,
2321 u16 tss_selector
, u16 old_tss_sel
,
2322 ulong old_tss_base
, struct desc_struct
*new_desc
)
2324 struct tss_segment_16 tss_seg
;
2326 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2328 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2330 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2331 /* FIXME: need to provide precise fault address */
2332 emulate_pf(ctxt
, old_tss_base
, err
);
2336 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2338 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2340 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2341 /* FIXME: need to provide precise fault address */
2342 emulate_pf(ctxt
, old_tss_base
, err
);
2346 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2348 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2349 /* FIXME: need to provide precise fault address */
2350 emulate_pf(ctxt
, new_tss_base
, err
);
2354 if (old_tss_sel
!= 0xffff) {
2355 tss_seg
.prev_task_link
= old_tss_sel
;
2357 ret
= ops
->write_std(new_tss_base
,
2358 &tss_seg
.prev_task_link
,
2359 sizeof tss_seg
.prev_task_link
,
2361 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2362 /* FIXME: need to provide precise fault address */
2363 emulate_pf(ctxt
, new_tss_base
, err
);
2368 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2371 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2372 struct x86_emulate_ops
*ops
,
2373 struct tss_segment_32
*tss
)
2375 struct decode_cache
*c
= &ctxt
->decode
;
2377 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2379 tss
->eflags
= ctxt
->eflags
;
2380 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2381 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2382 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2383 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2384 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2385 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2386 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2387 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2389 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2390 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2391 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2392 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2393 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2394 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2395 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2398 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2399 struct x86_emulate_ops
*ops
,
2400 struct tss_segment_32
*tss
)
2402 struct decode_cache
*c
= &ctxt
->decode
;
2405 if (ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
)) {
2406 emulate_gp(ctxt
, 0);
2407 return X86EMUL_PROPAGATE_FAULT
;
2410 ctxt
->eflags
= tss
->eflags
| 2;
2411 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2412 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2413 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2414 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2415 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2416 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2417 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2418 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2421 * SDM says that segment selectors are loaded before segment
2424 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2425 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2426 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2427 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2428 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2429 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2430 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2433 * Now load segment descriptors. If fault happenes at this stage
2434 * it is handled in a context of new task
2436 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2437 if (ret
!= X86EMUL_CONTINUE
)
2439 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2440 if (ret
!= X86EMUL_CONTINUE
)
2442 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2443 if (ret
!= X86EMUL_CONTINUE
)
2445 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2446 if (ret
!= X86EMUL_CONTINUE
)
2448 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2449 if (ret
!= X86EMUL_CONTINUE
)
2451 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2452 if (ret
!= X86EMUL_CONTINUE
)
2454 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2455 if (ret
!= X86EMUL_CONTINUE
)
2458 return X86EMUL_CONTINUE
;
2461 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2462 struct x86_emulate_ops
*ops
,
2463 u16 tss_selector
, u16 old_tss_sel
,
2464 ulong old_tss_base
, struct desc_struct
*new_desc
)
2466 struct tss_segment_32 tss_seg
;
2468 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2470 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2472 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2473 /* FIXME: need to provide precise fault address */
2474 emulate_pf(ctxt
, old_tss_base
, err
);
2478 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2480 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2482 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2483 /* FIXME: need to provide precise fault address */
2484 emulate_pf(ctxt
, old_tss_base
, err
);
2488 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2490 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2491 /* FIXME: need to provide precise fault address */
2492 emulate_pf(ctxt
, new_tss_base
, err
);
2496 if (old_tss_sel
!= 0xffff) {
2497 tss_seg
.prev_task_link
= old_tss_sel
;
2499 ret
= ops
->write_std(new_tss_base
,
2500 &tss_seg
.prev_task_link
,
2501 sizeof tss_seg
.prev_task_link
,
2503 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2504 /* FIXME: need to provide precise fault address */
2505 emulate_pf(ctxt
, new_tss_base
, err
);
2510 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2513 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2514 struct x86_emulate_ops
*ops
,
2515 u16 tss_selector
, int reason
,
2516 bool has_error_code
, u32 error_code
)
2518 struct desc_struct curr_tss_desc
, next_tss_desc
;
2520 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2521 ulong old_tss_base
=
2522 ops
->get_cached_segment_base(VCPU_SREG_TR
, ctxt
->vcpu
);
2525 /* FIXME: old_tss_base == ~0 ? */
2527 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2528 if (ret
!= X86EMUL_CONTINUE
)
2530 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2531 if (ret
!= X86EMUL_CONTINUE
)
2534 /* FIXME: check that next_tss_desc is tss */
2536 if (reason
!= TASK_SWITCH_IRET
) {
2537 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2538 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2539 emulate_gp(ctxt
, 0);
2540 return X86EMUL_PROPAGATE_FAULT
;
2544 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2545 if (!next_tss_desc
.p
||
2546 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2547 desc_limit
< 0x2b)) {
2548 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2549 return X86EMUL_PROPAGATE_FAULT
;
2552 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2553 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2554 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2558 if (reason
== TASK_SWITCH_IRET
)
2559 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2561 /* set back link to prev task only if NT bit is set in eflags
2562 note that old_tss_sel is not used afetr this point */
2563 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2564 old_tss_sel
= 0xffff;
2566 if (next_tss_desc
.type
& 8)
2567 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2568 old_tss_base
, &next_tss_desc
);
2570 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2571 old_tss_base
, &next_tss_desc
);
2572 if (ret
!= X86EMUL_CONTINUE
)
2575 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2576 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2578 if (reason
!= TASK_SWITCH_IRET
) {
2579 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2580 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2584 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2585 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2586 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2588 if (has_error_code
) {
2589 struct decode_cache
*c
= &ctxt
->decode
;
2591 c
->op_bytes
= c
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2593 c
->src
.val
= (unsigned long) error_code
;
2594 emulate_push(ctxt
, ops
);
2600 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2601 struct x86_emulate_ops
*ops
,
2602 u16 tss_selector
, int reason
,
2603 bool has_error_code
, u32 error_code
)
2605 struct decode_cache
*c
= &ctxt
->decode
;
2609 c
->dst
.type
= OP_NONE
;
2611 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
,
2612 has_error_code
, error_code
);
2614 if (rc
== X86EMUL_CONTINUE
) {
2615 rc
= writeback(ctxt
, ops
);
2616 if (rc
== X86EMUL_CONTINUE
)
2620 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2623 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2624 int reg
, struct operand
*op
)
2626 struct decode_cache
*c
= &ctxt
->decode
;
2627 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2629 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2630 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2634 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2637 struct decode_cache
*c
= &ctxt
->decode
;
2638 int rc
= X86EMUL_CONTINUE
;
2639 int saved_dst_type
= c
->dst
.type
;
2641 ctxt
->decode
.mem_read
.pos
= 0;
2643 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2648 /* LOCK prefix is allowed only with some instructions */
2649 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2654 /* Privileged instruction can be executed only in CPL=0 */
2655 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2656 emulate_gp(ctxt
, 0);
2660 if (c
->rep_prefix
&& (c
->d
& String
)) {
2661 ctxt
->restart
= true;
2662 /* All REP prefixes have the same first termination condition */
2663 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2665 ctxt
->restart
= false;
2669 /* The second termination condition only applies for REPE
2670 * and REPNE. Test if the repeat string operation prefix is
2671 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2672 * corresponding termination condition according to:
2673 * - if REPE/REPZ and ZF = 0 then done
2674 * - if REPNE/REPNZ and ZF = 1 then done
2676 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2677 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2678 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2679 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2681 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2682 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2688 if (c
->src
.type
== OP_MEM
) {
2689 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src
.ptr
,
2690 c
->src
.valptr
, c
->src
.bytes
);
2691 if (rc
!= X86EMUL_CONTINUE
)
2693 c
->src
.orig_val64
= c
->src
.val64
;
2696 if (c
->src2
.type
== OP_MEM
) {
2697 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src2
.ptr
,
2698 &c
->src2
.val
, c
->src2
.bytes
);
2699 if (rc
!= X86EMUL_CONTINUE
)
2703 if ((c
->d
& DstMask
) == ImplicitOps
)
2707 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2708 /* optimisation - avoid slow emulated read if Mov */
2709 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->dst
.ptr
,
2710 &c
->dst
.val
, c
->dst
.bytes
);
2711 if (rc
!= X86EMUL_CONTINUE
)
2714 c
->dst
.orig_val
= c
->dst
.val
;
2724 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2726 case 0x06: /* push es */
2727 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2729 case 0x07: /* pop es */
2730 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2731 if (rc
!= X86EMUL_CONTINUE
)
2736 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2738 case 0x0e: /* push cs */
2739 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_CS
);
2743 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2745 case 0x16: /* push ss */
2746 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2748 case 0x17: /* pop ss */
2749 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2750 if (rc
!= X86EMUL_CONTINUE
)
2755 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2757 case 0x1e: /* push ds */
2758 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2760 case 0x1f: /* pop ds */
2761 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2762 if (rc
!= X86EMUL_CONTINUE
)
2767 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2771 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2775 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2779 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2781 case 0x40 ... 0x47: /* inc r16/r32 */
2782 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2784 case 0x48 ... 0x4f: /* dec r16/r32 */
2785 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2787 case 0x50 ... 0x57: /* push reg */
2788 emulate_push(ctxt
, ops
);
2790 case 0x58 ... 0x5f: /* pop reg */
2792 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2793 if (rc
!= X86EMUL_CONTINUE
)
2796 case 0x60: /* pusha */
2797 rc
= emulate_pusha(ctxt
, ops
);
2798 if (rc
!= X86EMUL_CONTINUE
)
2801 case 0x61: /* popa */
2802 rc
= emulate_popa(ctxt
, ops
);
2803 if (rc
!= X86EMUL_CONTINUE
)
2806 case 0x63: /* movsxd */
2807 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2808 goto cannot_emulate
;
2809 c
->dst
.val
= (s32
) c
->src
.val
;
2811 case 0x68: /* push imm */
2812 case 0x6a: /* push imm8 */
2813 emulate_push(ctxt
, ops
);
2815 case 0x6c: /* insb */
2816 case 0x6d: /* insw/insd */
2817 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2818 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2820 emulate_gp(ctxt
, 0);
2823 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
,
2824 c
->regs
[VCPU_REGS_RDX
], &c
->dst
.val
))
2825 goto done
; /* IO is needed, skip writeback */
2827 case 0x6e: /* outsb */
2828 case 0x6f: /* outsw/outsd */
2829 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2830 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2832 emulate_gp(ctxt
, 0);
2835 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2836 &c
->src
.val
, 1, ctxt
->vcpu
);
2838 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2840 case 0x70 ... 0x7f: /* jcc (short) */
2841 if (test_cc(c
->b
, ctxt
->eflags
))
2842 jmp_rel(c
, c
->src
.val
);
2844 case 0x80 ... 0x83: /* Grp1 */
2845 switch (c
->modrm_reg
) {
2866 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2868 case 0x86 ... 0x87: /* xchg */
2870 /* Write back the register source. */
2871 switch (c
->dst
.bytes
) {
2873 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2876 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2879 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2880 break; /* 64b reg: zero-extend */
2882 *c
->src
.ptr
= c
->dst
.val
;
2886 * Write back the memory destination with implicit LOCK
2889 c
->dst
.val
= c
->src
.val
;
2892 case 0x88 ... 0x8b: /* mov */
2894 case 0x8c: /* mov r/m, sreg */
2895 if (c
->modrm_reg
> VCPU_SREG_GS
) {
2899 c
->dst
.val
= ops
->get_segment_selector(c
->modrm_reg
, ctxt
->vcpu
);
2901 case 0x8d: /* lea r16/r32, m */
2902 c
->dst
.val
= c
->modrm_ea
;
2904 case 0x8e: { /* mov seg, r/m16 */
2909 if (c
->modrm_reg
== VCPU_SREG_CS
||
2910 c
->modrm_reg
> VCPU_SREG_GS
) {
2915 if (c
->modrm_reg
== VCPU_SREG_SS
)
2916 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
2918 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2920 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2923 case 0x8f: /* pop (sole member of Grp1a) */
2924 rc
= emulate_grp1a(ctxt
, ops
);
2925 if (rc
!= X86EMUL_CONTINUE
)
2928 case 0x90: /* nop / xchg r8,rax */
2929 if (c
->dst
.ptr
== (unsigned long *)&c
->regs
[VCPU_REGS_RAX
]) {
2930 c
->dst
.type
= OP_NONE
; /* nop */
2933 case 0x91 ... 0x97: /* xchg reg,rax */
2934 c
->src
.type
= OP_REG
;
2935 c
->src
.bytes
= c
->op_bytes
;
2936 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2937 c
->src
.val
= *(c
->src
.ptr
);
2939 case 0x9c: /* pushf */
2940 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2941 emulate_push(ctxt
, ops
);
2943 case 0x9d: /* popf */
2944 c
->dst
.type
= OP_REG
;
2945 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2946 c
->dst
.bytes
= c
->op_bytes
;
2947 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2948 if (rc
!= X86EMUL_CONTINUE
)
2951 case 0xa0 ... 0xa3: /* mov */
2952 case 0xa4 ... 0xa5: /* movs */
2954 case 0xa6 ... 0xa7: /* cmps */
2955 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2956 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2958 case 0xa8 ... 0xa9: /* test ax, imm */
2960 case 0xaa ... 0xab: /* stos */
2961 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2963 case 0xac ... 0xad: /* lods */
2965 case 0xae ... 0xaf: /* scas */
2966 DPRINTF("Urk! I don't handle SCAS.\n");
2967 goto cannot_emulate
;
2968 case 0xb0 ... 0xbf: /* mov r, imm */
2973 case 0xc3: /* ret */
2974 c
->dst
.type
= OP_REG
;
2975 c
->dst
.ptr
= &c
->eip
;
2976 c
->dst
.bytes
= c
->op_bytes
;
2977 goto pop_instruction
;
2978 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2980 c
->dst
.val
= c
->src
.val
;
2982 case 0xcb: /* ret far */
2983 rc
= emulate_ret_far(ctxt
, ops
);
2984 if (rc
!= X86EMUL_CONTINUE
)
2987 case 0xcf: /* iret */
2988 rc
= emulate_iret(ctxt
, ops
);
2990 if (rc
!= X86EMUL_CONTINUE
)
2993 case 0xd0 ... 0xd1: /* Grp2 */
2997 case 0xd2 ... 0xd3: /* Grp2 */
2998 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
3001 case 0xe4: /* inb */
3004 case 0xe6: /* outb */
3005 case 0xe7: /* out */
3007 case 0xe8: /* call (near) */ {
3008 long int rel
= c
->src
.val
;
3009 c
->src
.val
= (unsigned long) c
->eip
;
3011 emulate_push(ctxt
, ops
);
3014 case 0xe9: /* jmp rel */
3016 case 0xea: { /* jmp far */
3019 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
3021 if (load_segment_descriptor(ctxt
, ops
, sel
, VCPU_SREG_CS
))
3025 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
3029 jmp
: /* jmp rel short */
3030 jmp_rel(c
, c
->src
.val
);
3031 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3033 case 0xec: /* in al,dx */
3034 case 0xed: /* in (e/r)ax,dx */
3035 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
3037 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
3038 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
3039 emulate_gp(ctxt
, 0);
3042 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
3044 goto done
; /* IO is needed */
3046 case 0xee: /* out dx,al */
3047 case 0xef: /* out dx,(e/r)ax */
3048 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
3050 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
3051 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
3052 emulate_gp(ctxt
, 0);
3055 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
3057 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3059 case 0xf4: /* hlt */
3060 ctxt
->vcpu
->arch
.halt_request
= 1;
3062 case 0xf5: /* cmc */
3063 /* complement carry flag from eflags reg */
3064 ctxt
->eflags
^= EFLG_CF
;
3065 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3067 case 0xf6 ... 0xf7: /* Grp3 */
3068 if (!emulate_grp3(ctxt
, ops
))
3069 goto cannot_emulate
;
3071 case 0xf8: /* clc */
3072 ctxt
->eflags
&= ~EFLG_CF
;
3073 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3075 case 0xfa: /* cli */
3076 if (emulator_bad_iopl(ctxt
, ops
)) {
3077 emulate_gp(ctxt
, 0);
3080 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3081 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3084 case 0xfb: /* sti */
3085 if (emulator_bad_iopl(ctxt
, ops
)) {
3086 emulate_gp(ctxt
, 0);
3089 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3090 ctxt
->eflags
|= X86_EFLAGS_IF
;
3091 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3094 case 0xfc: /* cld */
3095 ctxt
->eflags
&= ~EFLG_DF
;
3096 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3098 case 0xfd: /* std */
3099 ctxt
->eflags
|= EFLG_DF
;
3100 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3102 case 0xfe: /* Grp4 */
3104 rc
= emulate_grp45(ctxt
, ops
);
3105 if (rc
!= X86EMUL_CONTINUE
)
3108 case 0xff: /* Grp5 */
3109 if (c
->modrm_reg
== 5)
3113 goto cannot_emulate
;
3117 rc
= writeback(ctxt
, ops
);
3118 if (rc
!= X86EMUL_CONTINUE
)
3122 * restore dst type in case the decoding will be reused
3123 * (happens for string instruction )
3125 c
->dst
.type
= saved_dst_type
;
3127 if ((c
->d
& SrcMask
) == SrcSI
)
3128 string_addr_inc(ctxt
, seg_override_base(ctxt
, ops
, c
),
3129 VCPU_REGS_RSI
, &c
->src
);
3131 if ((c
->d
& DstMask
) == DstDI
)
3132 string_addr_inc(ctxt
, es_base(ctxt
, ops
), VCPU_REGS_RDI
,
3135 if (c
->rep_prefix
&& (c
->d
& String
)) {
3136 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
3137 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
3139 * Re-enter guest when pio read ahead buffer is empty or,
3140 * if it is not used, after each 1024 iteration.
3142 if ((rc
->end
== 0 && !(c
->regs
[VCPU_REGS_RCX
] & 0x3ff)) ||
3143 (rc
->end
!= 0 && rc
->end
== rc
->pos
))
3144 ctxt
->restart
= false;
3147 * reset read cache here in case string instruction is restared
3150 ctxt
->decode
.mem_read
.end
= 0;
3154 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
3158 case 0x01: /* lgdt, lidt, lmsw */
3159 switch (c
->modrm_reg
) {
3161 unsigned long address
;
3163 case 0: /* vmcall */
3164 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
3165 goto cannot_emulate
;
3167 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3168 if (rc
!= X86EMUL_CONTINUE
)
3171 /* Let the processor re-execute the fixed hypercall */
3173 /* Disable writeback. */
3174 c
->dst
.type
= OP_NONE
;
3177 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3178 &size
, &address
, c
->op_bytes
);
3179 if (rc
!= X86EMUL_CONTINUE
)
3181 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3182 /* Disable writeback. */
3183 c
->dst
.type
= OP_NONE
;
3185 case 3: /* lidt/vmmcall */
3186 if (c
->modrm_mod
== 3) {
3187 switch (c
->modrm_rm
) {
3189 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3190 if (rc
!= X86EMUL_CONTINUE
)
3194 goto cannot_emulate
;
3197 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3200 if (rc
!= X86EMUL_CONTINUE
)
3202 realmode_lidt(ctxt
->vcpu
, size
, address
);
3204 /* Disable writeback. */
3205 c
->dst
.type
= OP_NONE
;
3209 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
3212 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3213 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3214 c
->dst
.type
= OP_NONE
;
3216 case 5: /* not defined */
3220 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3221 /* Disable writeback. */
3222 c
->dst
.type
= OP_NONE
;
3225 goto cannot_emulate
;
3228 case 0x05: /* syscall */
3229 rc
= emulate_syscall(ctxt
, ops
);
3230 if (rc
!= X86EMUL_CONTINUE
)
3236 emulate_clts(ctxt
->vcpu
);
3237 c
->dst
.type
= OP_NONE
;
3239 case 0x09: /* wbinvd */
3240 kvm_emulate_wbinvd(ctxt
->vcpu
);
3241 c
->dst
.type
= OP_NONE
;
3243 case 0x08: /* invd */
3244 case 0x0d: /* GrpP (prefetch) */
3245 case 0x18: /* Grp16 (prefetch/nop) */
3246 c
->dst
.type
= OP_NONE
;
3248 case 0x20: /* mov cr, reg */
3249 switch (c
->modrm_reg
) {
3256 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3257 c
->dst
.type
= OP_NONE
; /* no writeback */
3259 case 0x21: /* mov from dr to reg */
3260 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3261 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3265 ops
->get_dr(c
->modrm_reg
, &c
->regs
[c
->modrm_rm
], ctxt
->vcpu
);
3266 c
->dst
.type
= OP_NONE
; /* no writeback */
3268 case 0x22: /* mov reg, cr */
3269 if (ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
)) {
3270 emulate_gp(ctxt
, 0);
3273 c
->dst
.type
= OP_NONE
;
3275 case 0x23: /* mov from reg to dr */
3276 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3277 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3282 if (ops
->set_dr(c
->modrm_reg
, c
->regs
[c
->modrm_rm
] &
3283 ((ctxt
->mode
== X86EMUL_MODE_PROT64
) ?
3284 ~0ULL : ~0U), ctxt
->vcpu
) < 0) {
3285 /* #UD condition is already handled by the code above */
3286 emulate_gp(ctxt
, 0);
3290 c
->dst
.type
= OP_NONE
; /* no writeback */
3294 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3295 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3296 if (ops
->set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3297 emulate_gp(ctxt
, 0);
3300 rc
= X86EMUL_CONTINUE
;
3301 c
->dst
.type
= OP_NONE
;
3305 if (ops
->get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3306 emulate_gp(ctxt
, 0);
3309 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3310 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3312 rc
= X86EMUL_CONTINUE
;
3313 c
->dst
.type
= OP_NONE
;
3315 case 0x34: /* sysenter */
3316 rc
= emulate_sysenter(ctxt
, ops
);
3317 if (rc
!= X86EMUL_CONTINUE
)
3322 case 0x35: /* sysexit */
3323 rc
= emulate_sysexit(ctxt
, ops
);
3324 if (rc
!= X86EMUL_CONTINUE
)
3329 case 0x40 ... 0x4f: /* cmov */
3330 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3331 if (!test_cc(c
->b
, ctxt
->eflags
))
3332 c
->dst
.type
= OP_NONE
; /* no writeback */
3334 case 0x80 ... 0x8f: /* jnz rel, etc*/
3335 if (test_cc(c
->b
, ctxt
->eflags
))
3336 jmp_rel(c
, c
->src
.val
);
3337 c
->dst
.type
= OP_NONE
;
3339 case 0xa0: /* push fs */
3340 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3342 case 0xa1: /* pop fs */
3343 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3344 if (rc
!= X86EMUL_CONTINUE
)
3349 c
->dst
.type
= OP_NONE
;
3350 /* only subword offset */
3351 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3352 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3354 case 0xa4: /* shld imm8, r, r/m */
3355 case 0xa5: /* shld cl, r, r/m */
3356 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3358 case 0xa8: /* push gs */
3359 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3361 case 0xa9: /* pop gs */
3362 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3363 if (rc
!= X86EMUL_CONTINUE
)
3368 /* only subword offset */
3369 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3370 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3372 case 0xac: /* shrd imm8, r, r/m */
3373 case 0xad: /* shrd cl, r, r/m */
3374 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3376 case 0xae: /* clflush */
3378 case 0xb0 ... 0xb1: /* cmpxchg */
3380 * Save real source value, then compare EAX against
3383 c
->src
.orig_val
= c
->src
.val
;
3384 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3385 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3386 if (ctxt
->eflags
& EFLG_ZF
) {
3387 /* Success: write back to memory. */
3388 c
->dst
.val
= c
->src
.orig_val
;
3390 /* Failure: write the value we saw to EAX. */
3391 c
->dst
.type
= OP_REG
;
3392 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3397 /* only subword offset */
3398 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3399 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3401 case 0xb6 ... 0xb7: /* movzx */
3402 c
->dst
.bytes
= c
->op_bytes
;
3403 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3406 case 0xba: /* Grp8 */
3407 switch (c
->modrm_reg
& 3) {
3420 /* only subword offset */
3421 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3422 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3424 case 0xbe ... 0xbf: /* movsx */
3425 c
->dst
.bytes
= c
->op_bytes
;
3426 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3429 case 0xc3: /* movnti */
3430 c
->dst
.bytes
= c
->op_bytes
;
3431 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3434 case 0xc7: /* Grp9 (cmpxchg8b) */
3435 rc
= emulate_grp9(ctxt
, ops
);
3436 if (rc
!= X86EMUL_CONTINUE
)
3440 goto cannot_emulate
;
3445 DPRINTF("Cannot emulate %02x\n", c
->b
);