1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<16) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<17) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<17) /* Register operand. */
53 #define DstMem (3<<17) /* Memory operand. */
54 #define DstAcc (4<<17) /* Destination Accumulator */
55 #define DstDI (5<<17) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<17) /* 64bit memory operand */
57 #define DstMask (7<<17)
58 /* Source operand type. */
59 #define SrcNone (0<<4) /* No source operand. */
60 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
61 #define SrcReg (1<<4) /* Register operand. */
62 #define SrcMem (2<<4) /* Memory operand. */
63 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65 #define SrcImm (5<<4) /* Immediate operand. */
66 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67 #define SrcOne (7<<4) /* Implied '1' */
68 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
70 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73 #define SrcAcc (0xd<<4) /* Source Accumulator */
74 #define SrcMask (0xf<<4)
75 /* Generic ModRM decode. */
77 /* Destination is only written; never read. */
80 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
81 #define String (1<<12) /* String instruction (rep capable) */
82 #define Stack (1<<13) /* Stack instruction (push/pop) */
83 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
84 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
85 #define GroupMask 0x0f /* Group number stored in bits 0:3 */
87 #define Undefined (1<<25) /* No Such Instruction */
88 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
89 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
91 /* Source 2 operand type */
92 #define Src2None (0<<29)
93 #define Src2CL (1<<29)
94 #define Src2ImmByte (2<<29)
95 #define Src2One (3<<29)
96 #define Src2Mask (7<<29)
98 #define X2(x) (x), (x)
99 #define X3(x) X2(x), (x)
100 #define X4(x) X2(x), X2(x)
101 #define X5(x) X4(x), (x)
102 #define X6(x) X4(x), X2(x)
103 #define X7(x) X4(x), X3(x)
104 #define X8(x) X4(x), X4(x)
105 #define X16(x) X8(x), X8(x)
108 Group1
, Group1A
, Group3
, Group4
, Group5
, Group7
, Group8
, Group9
,
111 static u32 opcode_table
[256] = {
113 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
114 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
115 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
116 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
118 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
119 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
120 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
121 ImplicitOps
| Stack
| No64
, 0,
123 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
124 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
125 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
126 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
128 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
129 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
130 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
131 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
133 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
134 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
135 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
137 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
138 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
139 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
141 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
142 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
143 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
145 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
146 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
147 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
156 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
157 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
160 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
161 DstDI
| ByteOp
| Mov
| String
, DstDI
| Mov
| String
, /* insb, insw/insd */
162 SrcSI
| ByteOp
| ImplicitOps
| String
, SrcSI
| ImplicitOps
| String
, /* outsb, outsw/outsd */
166 ByteOp
| DstMem
| SrcImm
| ModRM
| Group
| Group1
,
167 DstMem
| SrcImm
| ModRM
| Group
| Group1
,
168 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Group
| Group1
,
169 DstMem
| SrcImmByte
| ModRM
| Group
| Group1
,
170 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
171 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
173 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
174 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
175 DstMem
| SrcNone
| ModRM
| Mov
, ModRM
| DstReg
,
176 ImplicitOps
| SrcMem16
| ModRM
, Group
| Group1A
,
178 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
180 0, 0, SrcImmFAddr
| No64
, 0,
181 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
183 ByteOp
| DstAcc
| SrcMem
| Mov
| MemAbs
, DstAcc
| SrcMem
| Mov
| MemAbs
,
184 ByteOp
| DstMem
| SrcAcc
| Mov
| MemAbs
, DstMem
| SrcAcc
| Mov
| MemAbs
,
185 ByteOp
| SrcSI
| DstDI
| Mov
| String
, SrcSI
| DstDI
| Mov
| String
,
186 ByteOp
| SrcSI
| DstDI
| String
, SrcSI
| DstDI
| String
,
188 DstAcc
| SrcImmByte
| ByteOp
, DstAcc
| SrcImm
, ByteOp
| DstDI
| Mov
| String
, DstDI
| Mov
| String
,
189 ByteOp
| SrcSI
| DstAcc
| Mov
| String
, SrcSI
| DstAcc
| Mov
| String
,
190 ByteOp
| DstDI
| String
, DstDI
| String
,
192 X8(ByteOp
| DstReg
| SrcImm
| Mov
),
194 X8(DstReg
| SrcImm
| Mov
),
196 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
197 0, ImplicitOps
| Stack
, 0, 0,
198 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
200 0, 0, 0, ImplicitOps
| Stack
,
201 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
203 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
204 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
207 0, 0, 0, 0, 0, 0, 0, 0,
210 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
211 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
213 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
214 SrcImmFAddr
| No64
, SrcImmByte
| ImplicitOps
,
215 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
216 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
219 ImplicitOps
| Priv
, ImplicitOps
, ByteOp
| Group
| Group3
, Group
| Group3
,
221 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
222 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
225 static u32 twobyte_table
[256] = {
227 0, Group
| GroupDual
| Group7
, 0, 0,
228 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
229 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
230 0, ImplicitOps
| ModRM
, 0, 0,
232 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
234 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
235 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
237 0, 0, 0, 0, 0, 0, 0, 0,
239 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
240 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
241 0, 0, 0, 0, 0, 0, 0, 0,
243 X16(DstReg
| SrcMem
| ModRM
| Mov
),
245 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
247 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
249 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
253 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
255 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
256 0, DstMem
| SrcReg
| ModRM
| BitOp
,
257 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
258 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
260 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
261 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
262 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
263 DstMem
| SrcReg
| Src2CL
| ModRM
,
266 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
267 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
268 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
269 DstReg
| SrcMem16
| ModRM
| Mov
,
272 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
273 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
274 DstReg
| SrcMem16
| ModRM
| Mov
,
276 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
277 0, 0, 0, Group
| GroupDual
| Group9
,
278 0, 0, 0, 0, 0, 0, 0, 0,
280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
282 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
284 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
287 static u32 group_table
[] = {
291 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
293 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
294 DstMem
| SrcNone
| ModRM
| Lock
, DstMem
| SrcNone
| ModRM
| Lock
,
297 ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
, ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
,
300 DstMem
| SrcNone
| ModRM
| Lock
, DstMem
| SrcNone
| ModRM
| Lock
,
301 SrcMem
| ModRM
| Stack
, 0,
302 SrcMem
| ModRM
| Stack
, SrcMemFAddr
| ModRM
| ImplicitOps
,
303 SrcMem
| ModRM
| Stack
, 0,
305 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
306 SrcNone
| ModRM
| DstMem
| Mov
, 0,
307 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
310 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
311 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
313 0, DstMem64
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
316 static u32 group2_table
[] = {
318 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
319 SrcNone
| ModRM
| DstMem
| Mov
, 0,
320 SrcMem16
| ModRM
| Mov
| Priv
, 0,
322 0, 0, 0, 0, 0, 0, 0, 0,
325 /* EFLAGS bit definitions. */
326 #define EFLG_ID (1<<21)
327 #define EFLG_VIP (1<<20)
328 #define EFLG_VIF (1<<19)
329 #define EFLG_AC (1<<18)
330 #define EFLG_VM (1<<17)
331 #define EFLG_RF (1<<16)
332 #define EFLG_IOPL (3<<12)
333 #define EFLG_NT (1<<14)
334 #define EFLG_OF (1<<11)
335 #define EFLG_DF (1<<10)
336 #define EFLG_IF (1<<9)
337 #define EFLG_TF (1<<8)
338 #define EFLG_SF (1<<7)
339 #define EFLG_ZF (1<<6)
340 #define EFLG_AF (1<<4)
341 #define EFLG_PF (1<<2)
342 #define EFLG_CF (1<<0)
345 * Instruction emulation:
346 * Most instructions are emulated directly via a fragment of inline assembly
347 * code. This allows us to save/restore EFLAGS and thus very easily pick up
348 * any modified flags.
351 #if defined(CONFIG_X86_64)
352 #define _LO32 "k" /* force 32-bit operand */
353 #define _STK "%%rsp" /* stack pointer */
354 #elif defined(__i386__)
355 #define _LO32 "" /* force 32-bit operand */
356 #define _STK "%%esp" /* stack pointer */
360 * These EFLAGS bits are restored from saved value during emulation, and
361 * any changes are written back to the saved value after emulation.
363 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
365 /* Before executing instruction: restore necessary bits in EFLAGS. */
366 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
367 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
368 "movl %"_sav",%"_LO32 _tmp"; " \
371 "movl %"_msk",%"_LO32 _tmp"; " \
372 "andl %"_LO32 _tmp",("_STK"); " \
374 "notl %"_LO32 _tmp"; " \
375 "andl %"_LO32 _tmp",("_STK"); " \
376 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
378 "orl %"_LO32 _tmp",("_STK"); " \
382 /* After executing instruction: write-back necessary bits in EFLAGS. */
383 #define _POST_EFLAGS(_sav, _msk, _tmp) \
384 /* _sav |= EFLAGS & _msk; */ \
387 "andl %"_msk",%"_LO32 _tmp"; " \
388 "orl %"_LO32 _tmp",%"_sav"; "
396 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
398 __asm__ __volatile__ ( \
399 _PRE_EFLAGS("0", "4", "2") \
400 _op _suffix " %"_x"3,%1; " \
401 _POST_EFLAGS("0", "4", "2") \
402 : "=m" (_eflags), "=m" ((_dst).val), \
404 : _y ((_src).val), "i" (EFLAGS_MASK)); \
408 /* Raw emulation: instruction has two explicit operands. */
409 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
411 unsigned long _tmp; \
413 switch ((_dst).bytes) { \
415 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
418 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
421 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
426 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
428 unsigned long _tmp; \
429 switch ((_dst).bytes) { \
431 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
434 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
435 _wx, _wy, _lx, _ly, _qx, _qy); \
440 /* Source operand is byte-sized and may be restricted to just %cl. */
441 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
442 __emulate_2op(_op, _src, _dst, _eflags, \
443 "b", "c", "b", "c", "b", "c", "b", "c")
445 /* Source operand is byte, word, long or quad sized. */
446 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
447 __emulate_2op(_op, _src, _dst, _eflags, \
448 "b", "q", "w", "r", _LO32, "r", "", "r")
450 /* Source operand is word, long or quad sized. */
451 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
452 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
453 "w", "r", _LO32, "r", "", "r")
455 /* Instruction has three operands and one operand is stored in ECX register */
456 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
458 unsigned long _tmp; \
459 _type _clv = (_cl).val; \
460 _type _srcv = (_src).val; \
461 _type _dstv = (_dst).val; \
463 __asm__ __volatile__ ( \
464 _PRE_EFLAGS("0", "5", "2") \
465 _op _suffix " %4,%1 \n" \
466 _POST_EFLAGS("0", "5", "2") \
467 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
468 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
471 (_cl).val = (unsigned long) _clv; \
472 (_src).val = (unsigned long) _srcv; \
473 (_dst).val = (unsigned long) _dstv; \
476 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
478 switch ((_dst).bytes) { \
480 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
481 "w", unsigned short); \
484 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
485 "l", unsigned int); \
488 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
489 "q", unsigned long)); \
494 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
496 unsigned long _tmp; \
498 __asm__ __volatile__ ( \
499 _PRE_EFLAGS("0", "3", "2") \
500 _op _suffix " %1; " \
501 _POST_EFLAGS("0", "3", "2") \
502 : "=m" (_eflags), "+m" ((_dst).val), \
504 : "i" (EFLAGS_MASK)); \
507 /* Instruction has only one explicit operand (no source operand). */
508 #define emulate_1op(_op, _dst, _eflags) \
510 switch ((_dst).bytes) { \
511 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
512 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
513 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
514 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
518 /* Fetch next part of the instruction being emulated. */
519 #define insn_fetch(_type, _size, _eip) \
520 ({ unsigned long _x; \
521 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
522 if (rc != X86EMUL_CONTINUE) \
528 #define insn_fetch_arr(_arr, _size, _eip) \
529 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
530 if (rc != X86EMUL_CONTINUE) \
535 static inline unsigned long ad_mask(struct decode_cache
*c
)
537 return (1UL << (c
->ad_bytes
<< 3)) - 1;
540 /* Access/update address held in a register, based on addressing mode. */
541 static inline unsigned long
542 address_mask(struct decode_cache
*c
, unsigned long reg
)
544 if (c
->ad_bytes
== sizeof(unsigned long))
547 return reg
& ad_mask(c
);
550 static inline unsigned long
551 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
553 return base
+ address_mask(c
, reg
);
557 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
559 if (c
->ad_bytes
== sizeof(unsigned long))
562 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
565 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
567 register_address_increment(c
, &c
->eip
, rel
);
570 static void set_seg_override(struct decode_cache
*c
, int seg
)
572 c
->has_seg_override
= true;
573 c
->seg_override
= seg
;
576 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
,
577 struct x86_emulate_ops
*ops
, int seg
)
579 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
582 return ops
->get_cached_segment_base(seg
, ctxt
->vcpu
);
585 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
586 struct x86_emulate_ops
*ops
,
587 struct decode_cache
*c
)
589 if (!c
->has_seg_override
)
592 return seg_base(ctxt
, ops
, c
->seg_override
);
595 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
,
596 struct x86_emulate_ops
*ops
)
598 return seg_base(ctxt
, ops
, VCPU_SREG_ES
);
601 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
,
602 struct x86_emulate_ops
*ops
)
604 return seg_base(ctxt
, ops
, VCPU_SREG_SS
);
607 static void emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
608 u32 error
, bool valid
)
610 ctxt
->exception
= vec
;
611 ctxt
->error_code
= error
;
612 ctxt
->error_code_valid
= valid
;
613 ctxt
->restart
= false;
616 static void emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
618 emulate_exception(ctxt
, GP_VECTOR
, err
, true);
621 static void emulate_pf(struct x86_emulate_ctxt
*ctxt
, unsigned long addr
,
625 emulate_exception(ctxt
, PF_VECTOR
, err
, true);
628 static void emulate_ud(struct x86_emulate_ctxt
*ctxt
)
630 emulate_exception(ctxt
, UD_VECTOR
, 0, false);
633 static void emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
635 emulate_exception(ctxt
, TS_VECTOR
, err
, true);
638 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
639 struct x86_emulate_ops
*ops
,
640 unsigned long eip
, u8
*dest
)
642 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
646 if (eip
== fc
->end
) {
647 cur_size
= fc
->end
- fc
->start
;
648 size
= min(15UL - cur_size
, PAGE_SIZE
- offset_in_page(eip
));
649 rc
= ops
->fetch(ctxt
->cs_base
+ eip
, fc
->data
+ cur_size
,
650 size
, ctxt
->vcpu
, NULL
);
651 if (rc
!= X86EMUL_CONTINUE
)
655 *dest
= fc
->data
[eip
- fc
->start
];
656 return X86EMUL_CONTINUE
;
659 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
660 struct x86_emulate_ops
*ops
,
661 unsigned long eip
, void *dest
, unsigned size
)
665 /* x86 instructions are limited to 15 bytes. */
666 if (eip
+ size
- ctxt
->eip
> 15)
667 return X86EMUL_UNHANDLEABLE
;
669 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
670 if (rc
!= X86EMUL_CONTINUE
)
673 return X86EMUL_CONTINUE
;
677 * Given the 'reg' portion of a ModRM byte, and a register block, return a
678 * pointer into the block that addresses the relevant register.
679 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
681 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
686 p
= ®s
[modrm_reg
];
687 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
688 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
692 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
693 struct x86_emulate_ops
*ops
,
695 u16
*size
, unsigned long *address
, int op_bytes
)
702 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
704 if (rc
!= X86EMUL_CONTINUE
)
706 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
711 static int test_cc(unsigned int condition
, unsigned int flags
)
715 switch ((condition
& 15) >> 1) {
717 rc
|= (flags
& EFLG_OF
);
719 case 1: /* b/c/nae */
720 rc
|= (flags
& EFLG_CF
);
723 rc
|= (flags
& EFLG_ZF
);
726 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
729 rc
|= (flags
& EFLG_SF
);
732 rc
|= (flags
& EFLG_PF
);
735 rc
|= (flags
& EFLG_ZF
);
738 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
742 /* Odd condition identifiers (lsb == 1) have inverted sense. */
743 return (!!rc
^ (condition
& 1));
746 static void decode_register_operand(struct operand
*op
,
747 struct decode_cache
*c
,
750 unsigned reg
= c
->modrm_reg
;
751 int highbyte_regs
= c
->rex_prefix
== 0;
754 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
756 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
757 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
758 op
->val
= *(u8
*)op
->ptr
;
761 op
->ptr
= decode_register(reg
, c
->regs
, 0);
762 op
->bytes
= c
->op_bytes
;
765 op
->val
= *(u16
*)op
->ptr
;
768 op
->val
= *(u32
*)op
->ptr
;
771 op
->val
= *(u64
*) op
->ptr
;
775 op
->orig_val
= op
->val
;
778 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
779 struct x86_emulate_ops
*ops
)
781 struct decode_cache
*c
= &ctxt
->decode
;
783 int index_reg
= 0, base_reg
= 0, scale
;
784 int rc
= X86EMUL_CONTINUE
;
787 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
788 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
789 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
792 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
793 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
794 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
795 c
->modrm_rm
|= (c
->modrm
& 0x07);
799 if (c
->modrm_mod
== 3) {
800 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
801 c
->regs
, c
->d
& ByteOp
);
802 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
806 if (c
->ad_bytes
== 2) {
807 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
808 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
809 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
810 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
812 /* 16-bit ModR/M decode. */
813 switch (c
->modrm_mod
) {
815 if (c
->modrm_rm
== 6)
816 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
819 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
822 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
825 switch (c
->modrm_rm
) {
827 c
->modrm_ea
+= bx
+ si
;
830 c
->modrm_ea
+= bx
+ di
;
833 c
->modrm_ea
+= bp
+ si
;
836 c
->modrm_ea
+= bp
+ di
;
845 if (c
->modrm_mod
!= 0)
852 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
853 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
854 if (!c
->has_seg_override
)
855 set_seg_override(c
, VCPU_SREG_SS
);
856 c
->modrm_ea
= (u16
)c
->modrm_ea
;
858 /* 32/64-bit ModR/M decode. */
859 if ((c
->modrm_rm
& 7) == 4) {
860 sib
= insn_fetch(u8
, 1, c
->eip
);
861 index_reg
|= (sib
>> 3) & 7;
865 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
866 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
868 c
->modrm_ea
+= c
->regs
[base_reg
];
870 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
871 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
872 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
875 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
876 switch (c
->modrm_mod
) {
878 if (c
->modrm_rm
== 5)
879 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
882 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
885 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
893 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
894 struct x86_emulate_ops
*ops
)
896 struct decode_cache
*c
= &ctxt
->decode
;
897 int rc
= X86EMUL_CONTINUE
;
899 switch (c
->ad_bytes
) {
901 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
904 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
907 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
915 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
917 struct decode_cache
*c
= &ctxt
->decode
;
918 int rc
= X86EMUL_CONTINUE
;
919 int mode
= ctxt
->mode
;
920 int def_op_bytes
, def_ad_bytes
, group
, dual
;
923 /* we cannot decode insn before we complete previous rep insn */
924 WARN_ON(ctxt
->restart
);
927 c
->fetch
.start
= c
->fetch
.end
= c
->eip
;
928 ctxt
->cs_base
= seg_base(ctxt
, ops
, VCPU_SREG_CS
);
931 case X86EMUL_MODE_REAL
:
932 case X86EMUL_MODE_VM86
:
933 case X86EMUL_MODE_PROT16
:
934 def_op_bytes
= def_ad_bytes
= 2;
936 case X86EMUL_MODE_PROT32
:
937 def_op_bytes
= def_ad_bytes
= 4;
940 case X86EMUL_MODE_PROT64
:
949 c
->op_bytes
= def_op_bytes
;
950 c
->ad_bytes
= def_ad_bytes
;
952 /* Legacy prefixes. */
954 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
955 case 0x66: /* operand-size override */
956 /* switch between 2/4 bytes */
957 c
->op_bytes
= def_op_bytes
^ 6;
959 case 0x67: /* address-size override */
960 if (mode
== X86EMUL_MODE_PROT64
)
961 /* switch between 4/8 bytes */
962 c
->ad_bytes
= def_ad_bytes
^ 12;
964 /* switch between 2/4 bytes */
965 c
->ad_bytes
= def_ad_bytes
^ 6;
967 case 0x26: /* ES override */
968 case 0x2e: /* CS override */
969 case 0x36: /* SS override */
970 case 0x3e: /* DS override */
971 set_seg_override(c
, (c
->b
>> 3) & 3);
973 case 0x64: /* FS override */
974 case 0x65: /* GS override */
975 set_seg_override(c
, c
->b
& 7);
977 case 0x40 ... 0x4f: /* REX */
978 if (mode
!= X86EMUL_MODE_PROT64
)
980 c
->rex_prefix
= c
->b
;
982 case 0xf0: /* LOCK */
985 case 0xf2: /* REPNE/REPNZ */
986 c
->rep_prefix
= REPNE_PREFIX
;
988 case 0xf3: /* REP/REPE/REPZ */
989 c
->rep_prefix
= REPE_PREFIX
;
995 /* Any legacy prefix after a REX prefix nullifies its effect. */
1004 if (c
->rex_prefix
& 8)
1005 c
->op_bytes
= 8; /* REX.W */
1007 /* Opcode byte(s). */
1008 c
->d
= opcode_table
[c
->b
];
1010 /* Two-byte opcode? */
1013 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1014 c
->d
= twobyte_table
[c
->b
];
1019 group
= c
->d
& GroupMask
;
1020 dual
= c
->d
& GroupDual
;
1021 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1024 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1025 c
->d
&= ~(Group
| GroupDual
| GroupMask
);
1026 if (dual
&& (c
->modrm
>> 6) == 3)
1027 c
->d
|= group2_table
[group
];
1029 c
->d
|= group_table
[group
];
1033 if (c
->d
== 0 || (c
->d
& Undefined
)) {
1034 DPRINTF("Cannot emulate %02x\n", c
->b
);
1038 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1041 /* ModRM and SIB bytes. */
1043 rc
= decode_modrm(ctxt
, ops
);
1044 else if (c
->d
& MemAbs
)
1045 rc
= decode_abs(ctxt
, ops
);
1046 if (rc
!= X86EMUL_CONTINUE
)
1049 if (!c
->has_seg_override
)
1050 set_seg_override(c
, VCPU_SREG_DS
);
1052 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1053 c
->modrm_ea
+= seg_override_base(ctxt
, ops
, c
);
1055 if (c
->ad_bytes
!= 8)
1056 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1058 if (c
->rip_relative
)
1059 c
->modrm_ea
+= c
->eip
;
1062 * Decode and fetch the source operand: register, memory
1065 switch (c
->d
& SrcMask
) {
1069 decode_register_operand(&c
->src
, c
, 0);
1078 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1080 /* Don't fetch the address for invlpg: it could be unmapped. */
1081 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1085 * For instructions with a ModR/M byte, switch to register
1086 * access if Mod = 3.
1088 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1089 c
->src
.type
= OP_REG
;
1090 c
->src
.val
= c
->modrm_val
;
1091 c
->src
.ptr
= c
->modrm_ptr
;
1094 c
->src
.type
= OP_MEM
;
1095 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1100 c
->src
.type
= OP_IMM
;
1101 c
->src
.ptr
= (unsigned long *)c
->eip
;
1102 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1103 if (c
->src
.bytes
== 8)
1105 /* NB. Immediates are sign-extended as necessary. */
1106 switch (c
->src
.bytes
) {
1108 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1111 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1114 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1117 if ((c
->d
& SrcMask
) == SrcImmU
) {
1118 switch (c
->src
.bytes
) {
1123 c
->src
.val
&= 0xffff;
1126 c
->src
.val
&= 0xffffffff;
1133 c
->src
.type
= OP_IMM
;
1134 c
->src
.ptr
= (unsigned long *)c
->eip
;
1136 if ((c
->d
& SrcMask
) == SrcImmByte
)
1137 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1139 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1142 c
->src
.type
= OP_REG
;
1143 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1144 c
->src
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1145 switch (c
->src
.bytes
) {
1147 c
->src
.val
= *(u8
*)c
->src
.ptr
;
1150 c
->src
.val
= *(u16
*)c
->src
.ptr
;
1153 c
->src
.val
= *(u32
*)c
->src
.ptr
;
1156 c
->src
.val
= *(u64
*)c
->src
.ptr
;
1165 c
->src
.type
= OP_MEM
;
1166 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1167 c
->src
.ptr
= (unsigned long *)
1168 register_address(c
, seg_override_base(ctxt
, ops
, c
),
1169 c
->regs
[VCPU_REGS_RSI
]);
1173 c
->src
.type
= OP_IMM
;
1174 c
->src
.ptr
= (unsigned long *)c
->eip
;
1175 c
->src
.bytes
= c
->op_bytes
+ 2;
1176 insn_fetch_arr(c
->src
.valptr
, c
->src
.bytes
, c
->eip
);
1179 c
->src
.type
= OP_MEM
;
1180 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1181 c
->src
.bytes
= c
->op_bytes
+ 2;
1186 * Decode and fetch the second source operand: register, memory
1189 switch (c
->d
& Src2Mask
) {
1194 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1197 c
->src2
.type
= OP_IMM
;
1198 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1200 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1208 /* Decode and fetch the destination operand: register or memory. */
1209 switch (c
->d
& DstMask
) {
1211 /* Special instructions do their own operand decoding. */
1214 decode_register_operand(&c
->dst
, c
,
1215 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1219 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1220 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1221 c
->dst
.type
= OP_REG
;
1222 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1223 c
->dst
.ptr
= c
->modrm_ptr
;
1226 c
->dst
.type
= OP_MEM
;
1227 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1228 if ((c
->d
& DstMask
) == DstMem64
)
1231 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1234 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1236 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1237 (c
->src
.val
& mask
) / 8;
1241 c
->dst
.type
= OP_REG
;
1242 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1243 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1244 switch (c
->dst
.bytes
) {
1246 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1249 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1252 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1255 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1258 c
->dst
.orig_val
= c
->dst
.val
;
1261 c
->dst
.type
= OP_MEM
;
1262 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1263 c
->dst
.ptr
= (unsigned long *)
1264 register_address(c
, es_base(ctxt
, ops
),
1265 c
->regs
[VCPU_REGS_RDI
]);
1271 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1274 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1275 struct x86_emulate_ops
*ops
,
1276 unsigned long addr
, void *dest
, unsigned size
)
1279 struct read_cache
*mc
= &ctxt
->decode
.mem_read
;
1283 int n
= min(size
, 8u);
1285 if (mc
->pos
< mc
->end
)
1288 rc
= ops
->read_emulated(addr
, mc
->data
+ mc
->end
, n
, &err
,
1290 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1291 emulate_pf(ctxt
, addr
, err
);
1292 if (rc
!= X86EMUL_CONTINUE
)
1297 memcpy(dest
, mc
->data
+ mc
->pos
, n
);
1302 return X86EMUL_CONTINUE
;
1305 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1306 struct x86_emulate_ops
*ops
,
1307 unsigned int size
, unsigned short port
,
1310 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1312 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1313 struct decode_cache
*c
= &ctxt
->decode
;
1314 unsigned int in_page
, n
;
1315 unsigned int count
= c
->rep_prefix
?
1316 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1317 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1318 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1319 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1320 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1324 rc
->pos
= rc
->end
= 0;
1325 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1330 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1335 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1337 u32 limit
= get_desc_limit(desc
);
1339 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1342 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1343 struct x86_emulate_ops
*ops
,
1344 u16 selector
, struct desc_ptr
*dt
)
1346 if (selector
& 1 << 2) {
1347 struct desc_struct desc
;
1348 memset (dt
, 0, sizeof *dt
);
1349 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1352 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1353 dt
->address
= get_desc_base(&desc
);
1355 ops
->get_gdt(dt
, ctxt
->vcpu
);
1358 /* allowed just for 8 bytes segments */
1359 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1360 struct x86_emulate_ops
*ops
,
1361 u16 selector
, struct desc_struct
*desc
)
1364 u16 index
= selector
>> 3;
1369 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1371 if (dt
.size
< index
* 8 + 7) {
1372 emulate_gp(ctxt
, selector
& 0xfffc);
1373 return X86EMUL_PROPAGATE_FAULT
;
1375 addr
= dt
.address
+ index
* 8;
1376 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1377 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1378 emulate_pf(ctxt
, addr
, err
);
1383 /* allowed just for 8 bytes segments */
1384 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1385 struct x86_emulate_ops
*ops
,
1386 u16 selector
, struct desc_struct
*desc
)
1389 u16 index
= selector
>> 3;
1394 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1396 if (dt
.size
< index
* 8 + 7) {
1397 emulate_gp(ctxt
, selector
& 0xfffc);
1398 return X86EMUL_PROPAGATE_FAULT
;
1401 addr
= dt
.address
+ index
* 8;
1402 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1403 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1404 emulate_pf(ctxt
, addr
, err
);
1409 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1410 struct x86_emulate_ops
*ops
,
1411 u16 selector
, int seg
)
1413 struct desc_struct seg_desc
;
1415 unsigned err_vec
= GP_VECTOR
;
1417 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1420 memset(&seg_desc
, 0, sizeof seg_desc
);
1422 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1423 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1424 /* set real mode segment descriptor */
1425 set_desc_base(&seg_desc
, selector
<< 4);
1426 set_desc_limit(&seg_desc
, 0xffff);
1433 /* NULL selector is not valid for TR, CS and SS */
1434 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1438 /* TR should be in GDT only */
1439 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1442 if (null_selector
) /* for NULL selector skip all following checks */
1445 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1446 if (ret
!= X86EMUL_CONTINUE
)
1449 err_code
= selector
& 0xfffc;
1450 err_vec
= GP_VECTOR
;
1452 /* can't load system descriptor into segment selecor */
1453 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1457 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1463 cpl
= ops
->cpl(ctxt
->vcpu
);
1468 * segment is not a writable data segment or segment
1469 * selector's RPL != CPL or segment selector's RPL != CPL
1471 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1475 if (!(seg_desc
.type
& 8))
1478 if (seg_desc
.type
& 4) {
1484 if (rpl
> cpl
|| dpl
!= cpl
)
1487 /* CS(RPL) <- CPL */
1488 selector
= (selector
& 0xfffc) | cpl
;
1491 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1494 case VCPU_SREG_LDTR
:
1495 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1498 default: /* DS, ES, FS, or GS */
1500 * segment is not a data or readable code segment or
1501 * ((segment is a data or nonconforming code segment)
1502 * and (both RPL and CPL > DPL))
1504 if ((seg_desc
.type
& 0xa) == 0x8 ||
1505 (((seg_desc
.type
& 0xc) != 0xc) &&
1506 (rpl
> dpl
&& cpl
> dpl
)))
1512 /* mark segment as accessed */
1514 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1515 if (ret
!= X86EMUL_CONTINUE
)
1519 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1520 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1521 return X86EMUL_CONTINUE
;
1523 emulate_exception(ctxt
, err_vec
, err_code
, true);
1524 return X86EMUL_PROPAGATE_FAULT
;
1527 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1528 struct x86_emulate_ops
*ops
)
1531 struct decode_cache
*c
= &ctxt
->decode
;
1534 switch (c
->dst
.type
) {
1536 /* The 4-byte case *is* correct:
1537 * in 64-bit mode we zero-extend.
1539 switch (c
->dst
.bytes
) {
1541 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1544 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1547 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1548 break; /* 64b: zero-ext */
1550 *c
->dst
.ptr
= c
->dst
.val
;
1556 rc
= ops
->cmpxchg_emulated(
1557 (unsigned long)c
->dst
.ptr
,
1564 rc
= ops
->write_emulated(
1565 (unsigned long)c
->dst
.ptr
,
1570 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1572 (unsigned long)c
->dst
.ptr
, err
);
1573 if (rc
!= X86EMUL_CONTINUE
)
1582 return X86EMUL_CONTINUE
;
1585 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
,
1586 struct x86_emulate_ops
*ops
)
1588 struct decode_cache
*c
= &ctxt
->decode
;
1590 c
->dst
.type
= OP_MEM
;
1591 c
->dst
.bytes
= c
->op_bytes
;
1592 c
->dst
.val
= c
->src
.val
;
1593 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1594 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
, ops
),
1595 c
->regs
[VCPU_REGS_RSP
]);
1598 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1599 struct x86_emulate_ops
*ops
,
1600 void *dest
, int len
)
1602 struct decode_cache
*c
= &ctxt
->decode
;
1605 rc
= read_emulated(ctxt
, ops
, register_address(c
, ss_base(ctxt
, ops
),
1606 c
->regs
[VCPU_REGS_RSP
]),
1608 if (rc
!= X86EMUL_CONTINUE
)
1611 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1615 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1616 struct x86_emulate_ops
*ops
,
1617 void *dest
, int len
)
1620 unsigned long val
, change_mask
;
1621 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1622 int cpl
= ops
->cpl(ctxt
->vcpu
);
1624 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1625 if (rc
!= X86EMUL_CONTINUE
)
1628 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1629 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1631 switch(ctxt
->mode
) {
1632 case X86EMUL_MODE_PROT64
:
1633 case X86EMUL_MODE_PROT32
:
1634 case X86EMUL_MODE_PROT16
:
1636 change_mask
|= EFLG_IOPL
;
1638 change_mask
|= EFLG_IF
;
1640 case X86EMUL_MODE_VM86
:
1642 emulate_gp(ctxt
, 0);
1643 return X86EMUL_PROPAGATE_FAULT
;
1645 change_mask
|= EFLG_IF
;
1647 default: /* real mode */
1648 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1652 *(unsigned long *)dest
=
1653 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1658 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
,
1659 struct x86_emulate_ops
*ops
, int seg
)
1661 struct decode_cache
*c
= &ctxt
->decode
;
1663 c
->src
.val
= ops
->get_segment_selector(seg
, ctxt
->vcpu
);
1665 emulate_push(ctxt
, ops
);
1668 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1669 struct x86_emulate_ops
*ops
, int seg
)
1671 struct decode_cache
*c
= &ctxt
->decode
;
1672 unsigned long selector
;
1675 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1676 if (rc
!= X86EMUL_CONTINUE
)
1679 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1683 static int emulate_pusha(struct x86_emulate_ctxt
*ctxt
,
1684 struct x86_emulate_ops
*ops
)
1686 struct decode_cache
*c
= &ctxt
->decode
;
1687 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1688 int rc
= X86EMUL_CONTINUE
;
1689 int reg
= VCPU_REGS_RAX
;
1691 while (reg
<= VCPU_REGS_RDI
) {
1692 (reg
== VCPU_REGS_RSP
) ?
1693 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1695 emulate_push(ctxt
, ops
);
1697 rc
= writeback(ctxt
, ops
);
1698 if (rc
!= X86EMUL_CONTINUE
)
1704 /* Disable writeback. */
1705 c
->dst
.type
= OP_NONE
;
1710 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1711 struct x86_emulate_ops
*ops
)
1713 struct decode_cache
*c
= &ctxt
->decode
;
1714 int rc
= X86EMUL_CONTINUE
;
1715 int reg
= VCPU_REGS_RDI
;
1717 while (reg
>= VCPU_REGS_RAX
) {
1718 if (reg
== VCPU_REGS_RSP
) {
1719 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1724 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1725 if (rc
!= X86EMUL_CONTINUE
)
1732 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1733 struct x86_emulate_ops
*ops
)
1735 struct decode_cache
*c
= &ctxt
->decode
;
1737 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1740 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1742 struct decode_cache
*c
= &ctxt
->decode
;
1743 switch (c
->modrm_reg
) {
1745 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1748 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1751 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1754 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1756 case 4: /* sal/shl */
1757 case 6: /* sal/shl */
1758 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1761 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1764 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1769 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1770 struct x86_emulate_ops
*ops
)
1772 struct decode_cache
*c
= &ctxt
->decode
;
1774 switch (c
->modrm_reg
) {
1775 case 0 ... 1: /* test */
1776 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1779 c
->dst
.val
= ~c
->dst
.val
;
1782 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1790 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1791 struct x86_emulate_ops
*ops
)
1793 struct decode_cache
*c
= &ctxt
->decode
;
1795 switch (c
->modrm_reg
) {
1797 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1800 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1802 case 2: /* call near abs */ {
1805 c
->eip
= c
->src
.val
;
1806 c
->src
.val
= old_eip
;
1807 emulate_push(ctxt
, ops
);
1810 case 4: /* jmp abs */
1811 c
->eip
= c
->src
.val
;
1814 emulate_push(ctxt
, ops
);
1817 return X86EMUL_CONTINUE
;
1820 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1821 struct x86_emulate_ops
*ops
)
1823 struct decode_cache
*c
= &ctxt
->decode
;
1824 u64 old
= c
->dst
.orig_val64
;
1826 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1827 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1828 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1829 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1830 ctxt
->eflags
&= ~EFLG_ZF
;
1832 c
->dst
.val64
= ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1833 (u32
) c
->regs
[VCPU_REGS_RBX
];
1835 ctxt
->eflags
|= EFLG_ZF
;
1837 return X86EMUL_CONTINUE
;
1840 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1841 struct x86_emulate_ops
*ops
)
1843 struct decode_cache
*c
= &ctxt
->decode
;
1847 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1848 if (rc
!= X86EMUL_CONTINUE
)
1850 if (c
->op_bytes
== 4)
1851 c
->eip
= (u32
)c
->eip
;
1852 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1853 if (rc
!= X86EMUL_CONTINUE
)
1855 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1860 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1861 struct x86_emulate_ops
*ops
, struct desc_struct
*cs
,
1862 struct desc_struct
*ss
)
1864 memset(cs
, 0, sizeof(struct desc_struct
));
1865 ops
->get_cached_descriptor(cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1866 memset(ss
, 0, sizeof(struct desc_struct
));
1868 cs
->l
= 0; /* will be adjusted later */
1869 set_desc_base(cs
, 0); /* flat segment */
1870 cs
->g
= 1; /* 4kb granularity */
1871 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
1872 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1874 cs
->dpl
= 0; /* will be adjusted later */
1878 set_desc_base(ss
, 0); /* flat segment */
1879 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
1880 ss
->g
= 1; /* 4kb granularity */
1882 ss
->type
= 0x03; /* Read/Write, Accessed */
1883 ss
->d
= 1; /* 32bit stack segment */
1889 emulate_syscall(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1891 struct decode_cache
*c
= &ctxt
->decode
;
1892 struct desc_struct cs
, ss
;
1896 /* syscall is not available in real mode */
1897 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1898 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1900 return X86EMUL_PROPAGATE_FAULT
;
1903 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1905 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1907 cs_sel
= (u16
)(msr_data
& 0xfffc);
1908 ss_sel
= (u16
)(msr_data
+ 8);
1910 if (is_long_mode(ctxt
->vcpu
)) {
1914 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1915 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1916 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
1917 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1919 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1920 if (is_long_mode(ctxt
->vcpu
)) {
1921 #ifdef CONFIG_X86_64
1922 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1924 ops
->get_msr(ctxt
->vcpu
,
1925 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1926 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1929 ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1930 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1934 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1935 c
->eip
= (u32
)msr_data
;
1937 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1940 return X86EMUL_CONTINUE
;
1944 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1946 struct decode_cache
*c
= &ctxt
->decode
;
1947 struct desc_struct cs
, ss
;
1951 /* inject #GP if in real mode */
1952 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1953 emulate_gp(ctxt
, 0);
1954 return X86EMUL_PROPAGATE_FAULT
;
1957 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1958 * Therefore, we inject an #UD.
1960 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1962 return X86EMUL_PROPAGATE_FAULT
;
1965 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1967 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1968 switch (ctxt
->mode
) {
1969 case X86EMUL_MODE_PROT32
:
1970 if ((msr_data
& 0xfffc) == 0x0) {
1971 emulate_gp(ctxt
, 0);
1972 return X86EMUL_PROPAGATE_FAULT
;
1975 case X86EMUL_MODE_PROT64
:
1976 if (msr_data
== 0x0) {
1977 emulate_gp(ctxt
, 0);
1978 return X86EMUL_PROPAGATE_FAULT
;
1983 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1984 cs_sel
= (u16
)msr_data
;
1985 cs_sel
&= ~SELECTOR_RPL_MASK
;
1986 ss_sel
= cs_sel
+ 8;
1987 ss_sel
&= ~SELECTOR_RPL_MASK
;
1988 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1989 || is_long_mode(ctxt
->vcpu
)) {
1994 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1995 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1996 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
1997 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1999 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2002 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2003 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
2005 return X86EMUL_CONTINUE
;
2009 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2011 struct decode_cache
*c
= &ctxt
->decode
;
2012 struct desc_struct cs
, ss
;
2017 /* inject #GP if in real mode or Virtual 8086 mode */
2018 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2019 ctxt
->mode
== X86EMUL_MODE_VM86
) {
2020 emulate_gp(ctxt
, 0);
2021 return X86EMUL_PROPAGATE_FAULT
;
2024 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2026 if ((c
->rex_prefix
& 0x8) != 0x0)
2027 usermode
= X86EMUL_MODE_PROT64
;
2029 usermode
= X86EMUL_MODE_PROT32
;
2033 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2035 case X86EMUL_MODE_PROT32
:
2036 cs_sel
= (u16
)(msr_data
+ 16);
2037 if ((msr_data
& 0xfffc) == 0x0) {
2038 emulate_gp(ctxt
, 0);
2039 return X86EMUL_PROPAGATE_FAULT
;
2041 ss_sel
= (u16
)(msr_data
+ 24);
2043 case X86EMUL_MODE_PROT64
:
2044 cs_sel
= (u16
)(msr_data
+ 32);
2045 if (msr_data
== 0x0) {
2046 emulate_gp(ctxt
, 0);
2047 return X86EMUL_PROPAGATE_FAULT
;
2049 ss_sel
= cs_sel
+ 8;
2054 cs_sel
|= SELECTOR_RPL_MASK
;
2055 ss_sel
|= SELECTOR_RPL_MASK
;
2057 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2058 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2059 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2060 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2062 c
->eip
= c
->regs
[VCPU_REGS_RDX
];
2063 c
->regs
[VCPU_REGS_RSP
] = c
->regs
[VCPU_REGS_RCX
];
2065 return X86EMUL_CONTINUE
;
2068 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
2069 struct x86_emulate_ops
*ops
)
2072 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2074 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2076 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2077 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2080 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2081 struct x86_emulate_ops
*ops
,
2084 struct desc_struct tr_seg
;
2087 u8 perm
, bit_idx
= port
& 0x7;
2088 unsigned mask
= (1 << len
) - 1;
2090 ops
->get_cached_descriptor(&tr_seg
, VCPU_SREG_TR
, ctxt
->vcpu
);
2093 if (desc_limit_scaled(&tr_seg
) < 103)
2095 r
= ops
->read_std(get_desc_base(&tr_seg
) + 102, &io_bitmap_ptr
, 2,
2097 if (r
!= X86EMUL_CONTINUE
)
2099 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2101 r
= ops
->read_std(get_desc_base(&tr_seg
) + io_bitmap_ptr
+ port
/8,
2102 &perm
, 1, ctxt
->vcpu
, NULL
);
2103 if (r
!= X86EMUL_CONTINUE
)
2105 if ((perm
>> bit_idx
) & mask
)
2110 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2111 struct x86_emulate_ops
*ops
,
2114 if (emulator_bad_iopl(ctxt
, ops
))
2115 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2120 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2121 struct x86_emulate_ops
*ops
,
2122 struct tss_segment_16
*tss
)
2124 struct decode_cache
*c
= &ctxt
->decode
;
2127 tss
->flag
= ctxt
->eflags
;
2128 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2129 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2130 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2131 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2132 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2133 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2134 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2135 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2137 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2138 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2139 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2140 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2141 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2144 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2145 struct x86_emulate_ops
*ops
,
2146 struct tss_segment_16
*tss
)
2148 struct decode_cache
*c
= &ctxt
->decode
;
2152 ctxt
->eflags
= tss
->flag
| 2;
2153 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2154 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2155 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2156 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2157 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2158 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2159 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2160 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2163 * SDM says that segment selectors are loaded before segment
2166 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2167 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2168 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2169 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2170 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2173 * Now load segment descriptors. If fault happenes at this stage
2174 * it is handled in a context of new task
2176 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2177 if (ret
!= X86EMUL_CONTINUE
)
2179 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2180 if (ret
!= X86EMUL_CONTINUE
)
2182 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2183 if (ret
!= X86EMUL_CONTINUE
)
2185 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2186 if (ret
!= X86EMUL_CONTINUE
)
2188 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2189 if (ret
!= X86EMUL_CONTINUE
)
2192 return X86EMUL_CONTINUE
;
2195 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2196 struct x86_emulate_ops
*ops
,
2197 u16 tss_selector
, u16 old_tss_sel
,
2198 ulong old_tss_base
, struct desc_struct
*new_desc
)
2200 struct tss_segment_16 tss_seg
;
2202 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2204 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2206 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2207 /* FIXME: need to provide precise fault address */
2208 emulate_pf(ctxt
, old_tss_base
, err
);
2212 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2214 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2216 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2217 /* FIXME: need to provide precise fault address */
2218 emulate_pf(ctxt
, old_tss_base
, err
);
2222 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2224 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2225 /* FIXME: need to provide precise fault address */
2226 emulate_pf(ctxt
, new_tss_base
, err
);
2230 if (old_tss_sel
!= 0xffff) {
2231 tss_seg
.prev_task_link
= old_tss_sel
;
2233 ret
= ops
->write_std(new_tss_base
,
2234 &tss_seg
.prev_task_link
,
2235 sizeof tss_seg
.prev_task_link
,
2237 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2238 /* FIXME: need to provide precise fault address */
2239 emulate_pf(ctxt
, new_tss_base
, err
);
2244 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2247 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2248 struct x86_emulate_ops
*ops
,
2249 struct tss_segment_32
*tss
)
2251 struct decode_cache
*c
= &ctxt
->decode
;
2253 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2255 tss
->eflags
= ctxt
->eflags
;
2256 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2257 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2258 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2259 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2260 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2261 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2262 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2263 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2265 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2266 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2267 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2268 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2269 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2270 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2271 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2274 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2275 struct x86_emulate_ops
*ops
,
2276 struct tss_segment_32
*tss
)
2278 struct decode_cache
*c
= &ctxt
->decode
;
2281 if (ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
)) {
2282 emulate_gp(ctxt
, 0);
2283 return X86EMUL_PROPAGATE_FAULT
;
2286 ctxt
->eflags
= tss
->eflags
| 2;
2287 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2288 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2289 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2290 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2291 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2292 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2293 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2294 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2297 * SDM says that segment selectors are loaded before segment
2300 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2301 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2302 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2303 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2304 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2305 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2306 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2309 * Now load segment descriptors. If fault happenes at this stage
2310 * it is handled in a context of new task
2312 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2313 if (ret
!= X86EMUL_CONTINUE
)
2315 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2316 if (ret
!= X86EMUL_CONTINUE
)
2318 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2319 if (ret
!= X86EMUL_CONTINUE
)
2321 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2322 if (ret
!= X86EMUL_CONTINUE
)
2324 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2325 if (ret
!= X86EMUL_CONTINUE
)
2327 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2328 if (ret
!= X86EMUL_CONTINUE
)
2330 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2331 if (ret
!= X86EMUL_CONTINUE
)
2334 return X86EMUL_CONTINUE
;
2337 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2338 struct x86_emulate_ops
*ops
,
2339 u16 tss_selector
, u16 old_tss_sel
,
2340 ulong old_tss_base
, struct desc_struct
*new_desc
)
2342 struct tss_segment_32 tss_seg
;
2344 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2346 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2348 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2349 /* FIXME: need to provide precise fault address */
2350 emulate_pf(ctxt
, old_tss_base
, err
);
2354 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2356 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2358 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2359 /* FIXME: need to provide precise fault address */
2360 emulate_pf(ctxt
, old_tss_base
, err
);
2364 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2366 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2367 /* FIXME: need to provide precise fault address */
2368 emulate_pf(ctxt
, new_tss_base
, err
);
2372 if (old_tss_sel
!= 0xffff) {
2373 tss_seg
.prev_task_link
= old_tss_sel
;
2375 ret
= ops
->write_std(new_tss_base
,
2376 &tss_seg
.prev_task_link
,
2377 sizeof tss_seg
.prev_task_link
,
2379 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2380 /* FIXME: need to provide precise fault address */
2381 emulate_pf(ctxt
, new_tss_base
, err
);
2386 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2389 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2390 struct x86_emulate_ops
*ops
,
2391 u16 tss_selector
, int reason
,
2392 bool has_error_code
, u32 error_code
)
2394 struct desc_struct curr_tss_desc
, next_tss_desc
;
2396 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2397 ulong old_tss_base
=
2398 ops
->get_cached_segment_base(VCPU_SREG_TR
, ctxt
->vcpu
);
2401 /* FIXME: old_tss_base == ~0 ? */
2403 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2404 if (ret
!= X86EMUL_CONTINUE
)
2406 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2407 if (ret
!= X86EMUL_CONTINUE
)
2410 /* FIXME: check that next_tss_desc is tss */
2412 if (reason
!= TASK_SWITCH_IRET
) {
2413 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2414 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2415 emulate_gp(ctxt
, 0);
2416 return X86EMUL_PROPAGATE_FAULT
;
2420 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2421 if (!next_tss_desc
.p
||
2422 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2423 desc_limit
< 0x2b)) {
2424 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2425 return X86EMUL_PROPAGATE_FAULT
;
2428 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2429 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2430 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2434 if (reason
== TASK_SWITCH_IRET
)
2435 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2437 /* set back link to prev task only if NT bit is set in eflags
2438 note that old_tss_sel is not used afetr this point */
2439 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2440 old_tss_sel
= 0xffff;
2442 if (next_tss_desc
.type
& 8)
2443 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2444 old_tss_base
, &next_tss_desc
);
2446 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2447 old_tss_base
, &next_tss_desc
);
2448 if (ret
!= X86EMUL_CONTINUE
)
2451 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2452 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2454 if (reason
!= TASK_SWITCH_IRET
) {
2455 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2456 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2460 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2461 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2462 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2464 if (has_error_code
) {
2465 struct decode_cache
*c
= &ctxt
->decode
;
2467 c
->op_bytes
= c
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2469 c
->src
.val
= (unsigned long) error_code
;
2470 emulate_push(ctxt
, ops
);
2476 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2477 struct x86_emulate_ops
*ops
,
2478 u16 tss_selector
, int reason
,
2479 bool has_error_code
, u32 error_code
)
2481 struct decode_cache
*c
= &ctxt
->decode
;
2485 c
->dst
.type
= OP_NONE
;
2487 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
,
2488 has_error_code
, error_code
);
2490 if (rc
== X86EMUL_CONTINUE
) {
2491 rc
= writeback(ctxt
, ops
);
2492 if (rc
== X86EMUL_CONTINUE
)
2496 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2499 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2500 int reg
, struct operand
*op
)
2502 struct decode_cache
*c
= &ctxt
->decode
;
2503 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2505 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2506 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2510 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2513 struct decode_cache
*c
= &ctxt
->decode
;
2514 int rc
= X86EMUL_CONTINUE
;
2515 int saved_dst_type
= c
->dst
.type
;
2517 ctxt
->decode
.mem_read
.pos
= 0;
2519 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2524 /* LOCK prefix is allowed only with some instructions */
2525 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2530 /* Privileged instruction can be executed only in CPL=0 */
2531 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2532 emulate_gp(ctxt
, 0);
2536 if (c
->rep_prefix
&& (c
->d
& String
)) {
2537 ctxt
->restart
= true;
2538 /* All REP prefixes have the same first termination condition */
2539 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2541 ctxt
->restart
= false;
2545 /* The second termination condition only applies for REPE
2546 * and REPNE. Test if the repeat string operation prefix is
2547 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2548 * corresponding termination condition according to:
2549 * - if REPE/REPZ and ZF = 0 then done
2550 * - if REPNE/REPNZ and ZF = 1 then done
2552 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2553 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2554 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2555 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2557 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2558 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2564 if (c
->src
.type
== OP_MEM
) {
2565 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src
.ptr
,
2566 c
->src
.valptr
, c
->src
.bytes
);
2567 if (rc
!= X86EMUL_CONTINUE
)
2569 c
->src
.orig_val64
= c
->src
.val64
;
2572 if (c
->src2
.type
== OP_MEM
) {
2573 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src2
.ptr
,
2574 &c
->src2
.val
, c
->src2
.bytes
);
2575 if (rc
!= X86EMUL_CONTINUE
)
2579 if ((c
->d
& DstMask
) == ImplicitOps
)
2583 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2584 /* optimisation - avoid slow emulated read if Mov */
2585 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->dst
.ptr
,
2586 &c
->dst
.val
, c
->dst
.bytes
);
2587 if (rc
!= X86EMUL_CONTINUE
)
2590 c
->dst
.orig_val
= c
->dst
.val
;
2600 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2602 case 0x06: /* push es */
2603 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2605 case 0x07: /* pop es */
2606 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2607 if (rc
!= X86EMUL_CONTINUE
)
2612 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2614 case 0x0e: /* push cs */
2615 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_CS
);
2619 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2621 case 0x16: /* push ss */
2622 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2624 case 0x17: /* pop ss */
2625 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2626 if (rc
!= X86EMUL_CONTINUE
)
2631 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2633 case 0x1e: /* push ds */
2634 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2636 case 0x1f: /* pop ds */
2637 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2638 if (rc
!= X86EMUL_CONTINUE
)
2643 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2647 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2651 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2655 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2657 case 0x40 ... 0x47: /* inc r16/r32 */
2658 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2660 case 0x48 ... 0x4f: /* dec r16/r32 */
2661 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2663 case 0x50 ... 0x57: /* push reg */
2664 emulate_push(ctxt
, ops
);
2666 case 0x58 ... 0x5f: /* pop reg */
2668 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2669 if (rc
!= X86EMUL_CONTINUE
)
2672 case 0x60: /* pusha */
2673 rc
= emulate_pusha(ctxt
, ops
);
2674 if (rc
!= X86EMUL_CONTINUE
)
2677 case 0x61: /* popa */
2678 rc
= emulate_popa(ctxt
, ops
);
2679 if (rc
!= X86EMUL_CONTINUE
)
2682 case 0x63: /* movsxd */
2683 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2684 goto cannot_emulate
;
2685 c
->dst
.val
= (s32
) c
->src
.val
;
2687 case 0x68: /* push imm */
2688 case 0x6a: /* push imm8 */
2689 emulate_push(ctxt
, ops
);
2691 case 0x6c: /* insb */
2692 case 0x6d: /* insw/insd */
2693 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2694 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2696 emulate_gp(ctxt
, 0);
2699 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
,
2700 c
->regs
[VCPU_REGS_RDX
], &c
->dst
.val
))
2701 goto done
; /* IO is needed, skip writeback */
2703 case 0x6e: /* outsb */
2704 case 0x6f: /* outsw/outsd */
2705 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2706 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2708 emulate_gp(ctxt
, 0);
2711 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2712 &c
->src
.val
, 1, ctxt
->vcpu
);
2714 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2716 case 0x70 ... 0x7f: /* jcc (short) */
2717 if (test_cc(c
->b
, ctxt
->eflags
))
2718 jmp_rel(c
, c
->src
.val
);
2720 case 0x80 ... 0x83: /* Grp1 */
2721 switch (c
->modrm_reg
) {
2742 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2744 case 0x86 ... 0x87: /* xchg */
2746 /* Write back the register source. */
2747 switch (c
->dst
.bytes
) {
2749 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2752 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2755 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2756 break; /* 64b reg: zero-extend */
2758 *c
->src
.ptr
= c
->dst
.val
;
2762 * Write back the memory destination with implicit LOCK
2765 c
->dst
.val
= c
->src
.val
;
2768 case 0x88 ... 0x8b: /* mov */
2770 case 0x8c: /* mov r/m, sreg */
2771 if (c
->modrm_reg
> VCPU_SREG_GS
) {
2775 c
->dst
.val
= ops
->get_segment_selector(c
->modrm_reg
, ctxt
->vcpu
);
2777 case 0x8d: /* lea r16/r32, m */
2778 c
->dst
.val
= c
->modrm_ea
;
2780 case 0x8e: { /* mov seg, r/m16 */
2785 if (c
->modrm_reg
== VCPU_SREG_CS
||
2786 c
->modrm_reg
> VCPU_SREG_GS
) {
2791 if (c
->modrm_reg
== VCPU_SREG_SS
)
2792 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
2794 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2796 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2799 case 0x8f: /* pop (sole member of Grp1a) */
2800 rc
= emulate_grp1a(ctxt
, ops
);
2801 if (rc
!= X86EMUL_CONTINUE
)
2804 case 0x90: /* nop / xchg r8,rax */
2805 if (c
->dst
.ptr
== (unsigned long *)&c
->regs
[VCPU_REGS_RAX
]) {
2806 c
->dst
.type
= OP_NONE
; /* nop */
2809 case 0x91 ... 0x97: /* xchg reg,rax */
2810 c
->src
.type
= OP_REG
;
2811 c
->src
.bytes
= c
->op_bytes
;
2812 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2813 c
->src
.val
= *(c
->src
.ptr
);
2815 case 0x9c: /* pushf */
2816 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2817 emulate_push(ctxt
, ops
);
2819 case 0x9d: /* popf */
2820 c
->dst
.type
= OP_REG
;
2821 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2822 c
->dst
.bytes
= c
->op_bytes
;
2823 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2824 if (rc
!= X86EMUL_CONTINUE
)
2827 case 0xa0 ... 0xa3: /* mov */
2828 case 0xa4 ... 0xa5: /* movs */
2830 case 0xa6 ... 0xa7: /* cmps */
2831 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2832 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2834 case 0xa8 ... 0xa9: /* test ax, imm */
2836 case 0xaa ... 0xab: /* stos */
2837 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2839 case 0xac ... 0xad: /* lods */
2841 case 0xae ... 0xaf: /* scas */
2842 DPRINTF("Urk! I don't handle SCAS.\n");
2843 goto cannot_emulate
;
2844 case 0xb0 ... 0xbf: /* mov r, imm */
2849 case 0xc3: /* ret */
2850 c
->dst
.type
= OP_REG
;
2851 c
->dst
.ptr
= &c
->eip
;
2852 c
->dst
.bytes
= c
->op_bytes
;
2853 goto pop_instruction
;
2854 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2856 c
->dst
.val
= c
->src
.val
;
2858 case 0xcb: /* ret far */
2859 rc
= emulate_ret_far(ctxt
, ops
);
2860 if (rc
!= X86EMUL_CONTINUE
)
2863 case 0xd0 ... 0xd1: /* Grp2 */
2867 case 0xd2 ... 0xd3: /* Grp2 */
2868 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2871 case 0xe4: /* inb */
2874 case 0xe6: /* outb */
2875 case 0xe7: /* out */
2877 case 0xe8: /* call (near) */ {
2878 long int rel
= c
->src
.val
;
2879 c
->src
.val
= (unsigned long) c
->eip
;
2881 emulate_push(ctxt
, ops
);
2884 case 0xe9: /* jmp rel */
2886 case 0xea: { /* jmp far */
2889 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
2891 if (load_segment_descriptor(ctxt
, ops
, sel
, VCPU_SREG_CS
))
2895 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
2899 jmp
: /* jmp rel short */
2900 jmp_rel(c
, c
->src
.val
);
2901 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2903 case 0xec: /* in al,dx */
2904 case 0xed: /* in (e/r)ax,dx */
2905 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2907 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2908 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2909 emulate_gp(ctxt
, 0);
2912 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
2914 goto done
; /* IO is needed */
2916 case 0xee: /* out dx,al */
2917 case 0xef: /* out dx,(e/r)ax */
2918 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2920 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2921 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2922 emulate_gp(ctxt
, 0);
2925 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
2927 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2929 case 0xf4: /* hlt */
2930 ctxt
->vcpu
->arch
.halt_request
= 1;
2932 case 0xf5: /* cmc */
2933 /* complement carry flag from eflags reg */
2934 ctxt
->eflags
^= EFLG_CF
;
2935 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2937 case 0xf6 ... 0xf7: /* Grp3 */
2938 if (!emulate_grp3(ctxt
, ops
))
2939 goto cannot_emulate
;
2941 case 0xf8: /* clc */
2942 ctxt
->eflags
&= ~EFLG_CF
;
2943 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2945 case 0xfa: /* cli */
2946 if (emulator_bad_iopl(ctxt
, ops
)) {
2947 emulate_gp(ctxt
, 0);
2950 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2951 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2954 case 0xfb: /* sti */
2955 if (emulator_bad_iopl(ctxt
, ops
)) {
2956 emulate_gp(ctxt
, 0);
2959 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
2960 ctxt
->eflags
|= X86_EFLAGS_IF
;
2961 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2964 case 0xfc: /* cld */
2965 ctxt
->eflags
&= ~EFLG_DF
;
2966 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2968 case 0xfd: /* std */
2969 ctxt
->eflags
|= EFLG_DF
;
2970 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2972 case 0xfe: /* Grp4 */
2974 rc
= emulate_grp45(ctxt
, ops
);
2975 if (rc
!= X86EMUL_CONTINUE
)
2978 case 0xff: /* Grp5 */
2979 if (c
->modrm_reg
== 5)
2983 goto cannot_emulate
;
2987 rc
= writeback(ctxt
, ops
);
2988 if (rc
!= X86EMUL_CONTINUE
)
2992 * restore dst type in case the decoding will be reused
2993 * (happens for string instruction )
2995 c
->dst
.type
= saved_dst_type
;
2997 if ((c
->d
& SrcMask
) == SrcSI
)
2998 string_addr_inc(ctxt
, seg_override_base(ctxt
, ops
, c
),
2999 VCPU_REGS_RSI
, &c
->src
);
3001 if ((c
->d
& DstMask
) == DstDI
)
3002 string_addr_inc(ctxt
, es_base(ctxt
, ops
), VCPU_REGS_RDI
,
3005 if (c
->rep_prefix
&& (c
->d
& String
)) {
3006 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
3007 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
3009 * Re-enter guest when pio read ahead buffer is empty or,
3010 * if it is not used, after each 1024 iteration.
3012 if ((rc
->end
== 0 && !(c
->regs
[VCPU_REGS_RCX
] & 0x3ff)) ||
3013 (rc
->end
!= 0 && rc
->end
== rc
->pos
))
3014 ctxt
->restart
= false;
3017 * reset read cache here in case string instruction is restared
3020 ctxt
->decode
.mem_read
.end
= 0;
3024 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
3028 case 0x01: /* lgdt, lidt, lmsw */
3029 switch (c
->modrm_reg
) {
3031 unsigned long address
;
3033 case 0: /* vmcall */
3034 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
3035 goto cannot_emulate
;
3037 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3038 if (rc
!= X86EMUL_CONTINUE
)
3041 /* Let the processor re-execute the fixed hypercall */
3043 /* Disable writeback. */
3044 c
->dst
.type
= OP_NONE
;
3047 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3048 &size
, &address
, c
->op_bytes
);
3049 if (rc
!= X86EMUL_CONTINUE
)
3051 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3052 /* Disable writeback. */
3053 c
->dst
.type
= OP_NONE
;
3055 case 3: /* lidt/vmmcall */
3056 if (c
->modrm_mod
== 3) {
3057 switch (c
->modrm_rm
) {
3059 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3060 if (rc
!= X86EMUL_CONTINUE
)
3064 goto cannot_emulate
;
3067 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3070 if (rc
!= X86EMUL_CONTINUE
)
3072 realmode_lidt(ctxt
->vcpu
, size
, address
);
3074 /* Disable writeback. */
3075 c
->dst
.type
= OP_NONE
;
3079 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
3082 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3083 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3084 c
->dst
.type
= OP_NONE
;
3086 case 5: /* not defined */
3090 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3091 /* Disable writeback. */
3092 c
->dst
.type
= OP_NONE
;
3095 goto cannot_emulate
;
3098 case 0x05: /* syscall */
3099 rc
= emulate_syscall(ctxt
, ops
);
3100 if (rc
!= X86EMUL_CONTINUE
)
3106 emulate_clts(ctxt
->vcpu
);
3107 c
->dst
.type
= OP_NONE
;
3109 case 0x09: /* wbinvd */
3110 kvm_emulate_wbinvd(ctxt
->vcpu
);
3111 c
->dst
.type
= OP_NONE
;
3113 case 0x08: /* invd */
3114 case 0x0d: /* GrpP (prefetch) */
3115 case 0x18: /* Grp16 (prefetch/nop) */
3116 c
->dst
.type
= OP_NONE
;
3118 case 0x20: /* mov cr, reg */
3119 switch (c
->modrm_reg
) {
3126 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3127 c
->dst
.type
= OP_NONE
; /* no writeback */
3129 case 0x21: /* mov from dr to reg */
3130 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3131 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3135 ops
->get_dr(c
->modrm_reg
, &c
->regs
[c
->modrm_rm
], ctxt
->vcpu
);
3136 c
->dst
.type
= OP_NONE
; /* no writeback */
3138 case 0x22: /* mov reg, cr */
3139 if (ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
)) {
3140 emulate_gp(ctxt
, 0);
3143 c
->dst
.type
= OP_NONE
;
3145 case 0x23: /* mov from reg to dr */
3146 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3147 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3152 if (ops
->set_dr(c
->modrm_reg
, c
->regs
[c
->modrm_rm
] &
3153 ((ctxt
->mode
== X86EMUL_MODE_PROT64
) ?
3154 ~0ULL : ~0U), ctxt
->vcpu
) < 0) {
3155 /* #UD condition is already handled by the code above */
3156 emulate_gp(ctxt
, 0);
3160 c
->dst
.type
= OP_NONE
; /* no writeback */
3164 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3165 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3166 if (ops
->set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3167 emulate_gp(ctxt
, 0);
3170 rc
= X86EMUL_CONTINUE
;
3171 c
->dst
.type
= OP_NONE
;
3175 if (ops
->get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3176 emulate_gp(ctxt
, 0);
3179 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3180 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3182 rc
= X86EMUL_CONTINUE
;
3183 c
->dst
.type
= OP_NONE
;
3185 case 0x34: /* sysenter */
3186 rc
= emulate_sysenter(ctxt
, ops
);
3187 if (rc
!= X86EMUL_CONTINUE
)
3192 case 0x35: /* sysexit */
3193 rc
= emulate_sysexit(ctxt
, ops
);
3194 if (rc
!= X86EMUL_CONTINUE
)
3199 case 0x40 ... 0x4f: /* cmov */
3200 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3201 if (!test_cc(c
->b
, ctxt
->eflags
))
3202 c
->dst
.type
= OP_NONE
; /* no writeback */
3204 case 0x80 ... 0x8f: /* jnz rel, etc*/
3205 if (test_cc(c
->b
, ctxt
->eflags
))
3206 jmp_rel(c
, c
->src
.val
);
3207 c
->dst
.type
= OP_NONE
;
3209 case 0xa0: /* push fs */
3210 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3212 case 0xa1: /* pop fs */
3213 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3214 if (rc
!= X86EMUL_CONTINUE
)
3219 c
->dst
.type
= OP_NONE
;
3220 /* only subword offset */
3221 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3222 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3224 case 0xa4: /* shld imm8, r, r/m */
3225 case 0xa5: /* shld cl, r, r/m */
3226 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3228 case 0xa8: /* push gs */
3229 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3231 case 0xa9: /* pop gs */
3232 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3233 if (rc
!= X86EMUL_CONTINUE
)
3238 /* only subword offset */
3239 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3240 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3242 case 0xac: /* shrd imm8, r, r/m */
3243 case 0xad: /* shrd cl, r, r/m */
3244 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3246 case 0xae: /* clflush */
3248 case 0xb0 ... 0xb1: /* cmpxchg */
3250 * Save real source value, then compare EAX against
3253 c
->src
.orig_val
= c
->src
.val
;
3254 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3255 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3256 if (ctxt
->eflags
& EFLG_ZF
) {
3257 /* Success: write back to memory. */
3258 c
->dst
.val
= c
->src
.orig_val
;
3260 /* Failure: write the value we saw to EAX. */
3261 c
->dst
.type
= OP_REG
;
3262 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3267 /* only subword offset */
3268 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3269 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3271 case 0xb6 ... 0xb7: /* movzx */
3272 c
->dst
.bytes
= c
->op_bytes
;
3273 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3276 case 0xba: /* Grp8 */
3277 switch (c
->modrm_reg
& 3) {
3290 /* only subword offset */
3291 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3292 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3294 case 0xbe ... 0xbf: /* movsx */
3295 c
->dst
.bytes
= c
->op_bytes
;
3296 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3299 case 0xc3: /* movnti */
3300 c
->dst
.bytes
= c
->op_bytes
;
3301 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3304 case 0xc7: /* Grp9 (cmpxchg8b) */
3305 rc
= emulate_grp9(ctxt
, ops
);
3306 if (rc
!= X86EMUL_CONTINUE
)
3310 goto cannot_emulate
;
3315 DPRINTF("Cannot emulate %02x\n", c
->b
);