KVM: x86 emulator: switch src decode to decode_operand()
[deliverable/linux.git] / arch / x86 / kvm / emulate.c
1 /******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27
28 #include "x86.h"
29 #include "tss.h"
30
31 /*
32 * Operand types
33 */
34 #define OpNone 0ull
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54
55 #define OpBits 5 /* Width of operand field */
56 #define OpMask ((1ull << OpBits) - 1)
57
58 /*
59 * Opcode effective-address decode tables.
60 * Note that we only emulate instructions that have at least one memory
61 * operand (excluding implicit stack references). We assume that stack
62 * references and instruction fetches will never occur in special memory
63 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
64 * not be handled.
65 */
66
67 /* Operand sizes: 8-bit operands or specified/overridden size. */
68 #define ByteOp (1<<0) /* 8-bit operands. */
69 /* Destination operand type. */
70 #define DstShift 1
71 #define ImplicitOps (OpImplicit << DstShift)
72 #define DstReg (OpReg << DstShift)
73 #define DstMem (OpMem << DstShift)
74 #define DstAcc (OpAcc << DstShift)
75 #define DstDI (OpDI << DstShift)
76 #define DstMem64 (OpMem64 << DstShift)
77 #define DstImmUByte (OpImmUByte << DstShift)
78 #define DstDX (OpDX << DstShift)
79 #define DstMask (OpMask << DstShift)
80 /* Source operand type. */
81 #define SrcShift 6
82 #define SrcNone (OpNone << SrcShift)
83 #define SrcReg (OpReg << SrcShift)
84 #define SrcMem (OpMem << SrcShift)
85 #define SrcMem16 (OpMem16 << SrcShift)
86 #define SrcMem32 (OpMem32 << SrcShift)
87 #define SrcImm (OpImm << SrcShift)
88 #define SrcImmByte (OpImmByte << SrcShift)
89 #define SrcOne (OpOne << SrcShift)
90 #define SrcImmUByte (OpImmUByte << SrcShift)
91 #define SrcImmU (OpImmU << SrcShift)
92 #define SrcSI (OpSI << SrcShift)
93 #define SrcImmFAddr (OpImmFAddr << SrcShift)
94 #define SrcMemFAddr (OpMemFAddr << SrcShift)
95 #define SrcAcc (OpAcc << SrcShift)
96 #define SrcImmU16 (OpImmU16 << SrcShift)
97 #define SrcDX (OpDX << SrcShift)
98 #define SrcMask (OpMask << SrcShift)
99 #define BitOp (1<<11)
100 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
101 #define String (1<<13) /* String instruction (rep capable) */
102 #define Stack (1<<14) /* Stack instruction (push/pop) */
103 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
104 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
105 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
106 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
107 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
108 #define Sse (1<<18) /* SSE Vector instruction */
109 /* Generic ModRM decode. */
110 #define ModRM (1<<19)
111 /* Destination is only written; never read. */
112 #define Mov (1<<20)
113 /* Misc flags */
114 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
115 #define VendorSpecific (1<<22) /* Vendor specific instruction */
116 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
117 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
118 #define Undefined (1<<25) /* No Such Instruction */
119 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
120 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
121 #define No64 (1<<28)
122 /* Source 2 operand type */
123 #define Src2Shift (29)
124 #define Src2None (OpNone << Src2Shift)
125 #define Src2CL (OpCL << Src2Shift)
126 #define Src2ImmByte (OpImmByte << Src2Shift)
127 #define Src2One (OpOne << Src2Shift)
128 #define Src2Imm (OpImm << Src2Shift)
129 #define Src2Mask (OpMask << Src2Shift)
130
131 #define X2(x...) x, x
132 #define X3(x...) X2(x), x
133 #define X4(x...) X2(x), X2(x)
134 #define X5(x...) X4(x), x
135 #define X6(x...) X4(x), X2(x)
136 #define X7(x...) X4(x), X3(x)
137 #define X8(x...) X4(x), X4(x)
138 #define X16(x...) X8(x), X8(x)
139
140 struct opcode {
141 u64 flags : 56;
142 u64 intercept : 8;
143 union {
144 int (*execute)(struct x86_emulate_ctxt *ctxt);
145 struct opcode *group;
146 struct group_dual *gdual;
147 struct gprefix *gprefix;
148 } u;
149 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
150 };
151
152 struct group_dual {
153 struct opcode mod012[8];
154 struct opcode mod3[8];
155 };
156
157 struct gprefix {
158 struct opcode pfx_no;
159 struct opcode pfx_66;
160 struct opcode pfx_f2;
161 struct opcode pfx_f3;
162 };
163
164 /* EFLAGS bit definitions. */
165 #define EFLG_ID (1<<21)
166 #define EFLG_VIP (1<<20)
167 #define EFLG_VIF (1<<19)
168 #define EFLG_AC (1<<18)
169 #define EFLG_VM (1<<17)
170 #define EFLG_RF (1<<16)
171 #define EFLG_IOPL (3<<12)
172 #define EFLG_NT (1<<14)
173 #define EFLG_OF (1<<11)
174 #define EFLG_DF (1<<10)
175 #define EFLG_IF (1<<9)
176 #define EFLG_TF (1<<8)
177 #define EFLG_SF (1<<7)
178 #define EFLG_ZF (1<<6)
179 #define EFLG_AF (1<<4)
180 #define EFLG_PF (1<<2)
181 #define EFLG_CF (1<<0)
182
183 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
184 #define EFLG_RESERVED_ONE_MASK 2
185
186 /*
187 * Instruction emulation:
188 * Most instructions are emulated directly via a fragment of inline assembly
189 * code. This allows us to save/restore EFLAGS and thus very easily pick up
190 * any modified flags.
191 */
192
193 #if defined(CONFIG_X86_64)
194 #define _LO32 "k" /* force 32-bit operand */
195 #define _STK "%%rsp" /* stack pointer */
196 #elif defined(__i386__)
197 #define _LO32 "" /* force 32-bit operand */
198 #define _STK "%%esp" /* stack pointer */
199 #endif
200
201 /*
202 * These EFLAGS bits are restored from saved value during emulation, and
203 * any changes are written back to the saved value after emulation.
204 */
205 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
206
207 /* Before executing instruction: restore necessary bits in EFLAGS. */
208 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
209 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
210 "movl %"_sav",%"_LO32 _tmp"; " \
211 "push %"_tmp"; " \
212 "push %"_tmp"; " \
213 "movl %"_msk",%"_LO32 _tmp"; " \
214 "andl %"_LO32 _tmp",("_STK"); " \
215 "pushf; " \
216 "notl %"_LO32 _tmp"; " \
217 "andl %"_LO32 _tmp",("_STK"); " \
218 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
219 "pop %"_tmp"; " \
220 "orl %"_LO32 _tmp",("_STK"); " \
221 "popf; " \
222 "pop %"_sav"; "
223
224 /* After executing instruction: write-back necessary bits in EFLAGS. */
225 #define _POST_EFLAGS(_sav, _msk, _tmp) \
226 /* _sav |= EFLAGS & _msk; */ \
227 "pushf; " \
228 "pop %"_tmp"; " \
229 "andl %"_msk",%"_LO32 _tmp"; " \
230 "orl %"_LO32 _tmp",%"_sav"; "
231
232 #ifdef CONFIG_X86_64
233 #define ON64(x) x
234 #else
235 #define ON64(x)
236 #endif
237
238 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
239 do { \
240 __asm__ __volatile__ ( \
241 _PRE_EFLAGS("0", "4", "2") \
242 _op _suffix " %"_x"3,%1; " \
243 _POST_EFLAGS("0", "4", "2") \
244 : "=m" ((ctxt)->eflags), \
245 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
246 "=&r" (_tmp) \
247 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
248 } while (0)
249
250
251 /* Raw emulation: instruction has two explicit operands. */
252 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
253 do { \
254 unsigned long _tmp; \
255 \
256 switch ((ctxt)->dst.bytes) { \
257 case 2: \
258 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
259 break; \
260 case 4: \
261 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
262 break; \
263 case 8: \
264 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
265 break; \
266 } \
267 } while (0)
268
269 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
270 do { \
271 unsigned long _tmp; \
272 switch ((ctxt)->dst.bytes) { \
273 case 1: \
274 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
275 break; \
276 default: \
277 __emulate_2op_nobyte(ctxt, _op, \
278 _wx, _wy, _lx, _ly, _qx, _qy); \
279 break; \
280 } \
281 } while (0)
282
283 /* Source operand is byte-sized and may be restricted to just %cl. */
284 #define emulate_2op_SrcB(ctxt, _op) \
285 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
286
287 /* Source operand is byte, word, long or quad sized. */
288 #define emulate_2op_SrcV(ctxt, _op) \
289 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
290
291 /* Source operand is word, long or quad sized. */
292 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
293 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
294
295 /* Instruction has three operands and one operand is stored in ECX register */
296 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
297 do { \
298 unsigned long _tmp; \
299 _type _clv = (ctxt)->src2.val; \
300 _type _srcv = (ctxt)->src.val; \
301 _type _dstv = (ctxt)->dst.val; \
302 \
303 __asm__ __volatile__ ( \
304 _PRE_EFLAGS("0", "5", "2") \
305 _op _suffix " %4,%1 \n" \
306 _POST_EFLAGS("0", "5", "2") \
307 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
308 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
309 ); \
310 \
311 (ctxt)->src2.val = (unsigned long) _clv; \
312 (ctxt)->src2.val = (unsigned long) _srcv; \
313 (ctxt)->dst.val = (unsigned long) _dstv; \
314 } while (0)
315
316 #define emulate_2op_cl(ctxt, _op) \
317 do { \
318 switch ((ctxt)->dst.bytes) { \
319 case 2: \
320 __emulate_2op_cl(ctxt, _op, "w", u16); \
321 break; \
322 case 4: \
323 __emulate_2op_cl(ctxt, _op, "l", u32); \
324 break; \
325 case 8: \
326 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
327 break; \
328 } \
329 } while (0)
330
331 #define __emulate_1op(ctxt, _op, _suffix) \
332 do { \
333 unsigned long _tmp; \
334 \
335 __asm__ __volatile__ ( \
336 _PRE_EFLAGS("0", "3", "2") \
337 _op _suffix " %1; " \
338 _POST_EFLAGS("0", "3", "2") \
339 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
340 "=&r" (_tmp) \
341 : "i" (EFLAGS_MASK)); \
342 } while (0)
343
344 /* Instruction has only one explicit operand (no source operand). */
345 #define emulate_1op(ctxt, _op) \
346 do { \
347 switch ((ctxt)->dst.bytes) { \
348 case 1: __emulate_1op(ctxt, _op, "b"); break; \
349 case 2: __emulate_1op(ctxt, _op, "w"); break; \
350 case 4: __emulate_1op(ctxt, _op, "l"); break; \
351 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
352 } \
353 } while (0)
354
355 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
356 do { \
357 unsigned long _tmp; \
358 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
359 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
360 \
361 __asm__ __volatile__ ( \
362 _PRE_EFLAGS("0", "5", "1") \
363 "1: \n\t" \
364 _op _suffix " %6; " \
365 "2: \n\t" \
366 _POST_EFLAGS("0", "5", "1") \
367 ".pushsection .fixup,\"ax\" \n\t" \
368 "3: movb $1, %4 \n\t" \
369 "jmp 2b \n\t" \
370 ".popsection \n\t" \
371 _ASM_EXTABLE(1b, 3b) \
372 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
373 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
374 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
375 "a" (*rax), "d" (*rdx)); \
376 } while (0)
377
378 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
379 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
380 do { \
381 switch((ctxt)->src.bytes) { \
382 case 1: \
383 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
384 break; \
385 case 2: \
386 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
387 break; \
388 case 4: \
389 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
390 break; \
391 case 8: ON64( \
392 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
393 break; \
394 } \
395 } while (0)
396
397 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
398 enum x86_intercept intercept,
399 enum x86_intercept_stage stage)
400 {
401 struct x86_instruction_info info = {
402 .intercept = intercept,
403 .rep_prefix = ctxt->rep_prefix,
404 .modrm_mod = ctxt->modrm_mod,
405 .modrm_reg = ctxt->modrm_reg,
406 .modrm_rm = ctxt->modrm_rm,
407 .src_val = ctxt->src.val64,
408 .src_bytes = ctxt->src.bytes,
409 .dst_bytes = ctxt->dst.bytes,
410 .ad_bytes = ctxt->ad_bytes,
411 .next_rip = ctxt->eip,
412 };
413
414 return ctxt->ops->intercept(ctxt, &info, stage);
415 }
416
417 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
418 {
419 return (1UL << (ctxt->ad_bytes << 3)) - 1;
420 }
421
422 /* Access/update address held in a register, based on addressing mode. */
423 static inline unsigned long
424 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
425 {
426 if (ctxt->ad_bytes == sizeof(unsigned long))
427 return reg;
428 else
429 return reg & ad_mask(ctxt);
430 }
431
432 static inline unsigned long
433 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
434 {
435 return address_mask(ctxt, reg);
436 }
437
438 static inline void
439 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
440 {
441 if (ctxt->ad_bytes == sizeof(unsigned long))
442 *reg += inc;
443 else
444 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
445 }
446
447 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
448 {
449 register_address_increment(ctxt, &ctxt->_eip, rel);
450 }
451
452 static u32 desc_limit_scaled(struct desc_struct *desc)
453 {
454 u32 limit = get_desc_limit(desc);
455
456 return desc->g ? (limit << 12) | 0xfff : limit;
457 }
458
459 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
460 {
461 ctxt->has_seg_override = true;
462 ctxt->seg_override = seg;
463 }
464
465 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
466 {
467 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
468 return 0;
469
470 return ctxt->ops->get_cached_segment_base(ctxt, seg);
471 }
472
473 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
474 {
475 if (!ctxt->has_seg_override)
476 return 0;
477
478 return ctxt->seg_override;
479 }
480
481 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
482 u32 error, bool valid)
483 {
484 ctxt->exception.vector = vec;
485 ctxt->exception.error_code = error;
486 ctxt->exception.error_code_valid = valid;
487 return X86EMUL_PROPAGATE_FAULT;
488 }
489
490 static int emulate_db(struct x86_emulate_ctxt *ctxt)
491 {
492 return emulate_exception(ctxt, DB_VECTOR, 0, false);
493 }
494
495 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
496 {
497 return emulate_exception(ctxt, GP_VECTOR, err, true);
498 }
499
500 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
501 {
502 return emulate_exception(ctxt, SS_VECTOR, err, true);
503 }
504
505 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
506 {
507 return emulate_exception(ctxt, UD_VECTOR, 0, false);
508 }
509
510 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
511 {
512 return emulate_exception(ctxt, TS_VECTOR, err, true);
513 }
514
515 static int emulate_de(struct x86_emulate_ctxt *ctxt)
516 {
517 return emulate_exception(ctxt, DE_VECTOR, 0, false);
518 }
519
520 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
521 {
522 return emulate_exception(ctxt, NM_VECTOR, 0, false);
523 }
524
525 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
526 {
527 u16 selector;
528 struct desc_struct desc;
529
530 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
531 return selector;
532 }
533
534 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
535 unsigned seg)
536 {
537 u16 dummy;
538 u32 base3;
539 struct desc_struct desc;
540
541 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
542 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
543 }
544
545 static int __linearize(struct x86_emulate_ctxt *ctxt,
546 struct segmented_address addr,
547 unsigned size, bool write, bool fetch,
548 ulong *linear)
549 {
550 struct desc_struct desc;
551 bool usable;
552 ulong la;
553 u32 lim;
554 u16 sel;
555 unsigned cpl, rpl;
556
557 la = seg_base(ctxt, addr.seg) + addr.ea;
558 switch (ctxt->mode) {
559 case X86EMUL_MODE_REAL:
560 break;
561 case X86EMUL_MODE_PROT64:
562 if (((signed long)la << 16) >> 16 != la)
563 return emulate_gp(ctxt, 0);
564 break;
565 default:
566 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
567 addr.seg);
568 if (!usable)
569 goto bad;
570 /* code segment or read-only data segment */
571 if (((desc.type & 8) || !(desc.type & 2)) && write)
572 goto bad;
573 /* unreadable code segment */
574 if (!fetch && (desc.type & 8) && !(desc.type & 2))
575 goto bad;
576 lim = desc_limit_scaled(&desc);
577 if ((desc.type & 8) || !(desc.type & 4)) {
578 /* expand-up segment */
579 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
580 goto bad;
581 } else {
582 /* exapand-down segment */
583 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
584 goto bad;
585 lim = desc.d ? 0xffffffff : 0xffff;
586 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
587 goto bad;
588 }
589 cpl = ctxt->ops->cpl(ctxt);
590 rpl = sel & 3;
591 cpl = max(cpl, rpl);
592 if (!(desc.type & 8)) {
593 /* data segment */
594 if (cpl > desc.dpl)
595 goto bad;
596 } else if ((desc.type & 8) && !(desc.type & 4)) {
597 /* nonconforming code segment */
598 if (cpl != desc.dpl)
599 goto bad;
600 } else if ((desc.type & 8) && (desc.type & 4)) {
601 /* conforming code segment */
602 if (cpl < desc.dpl)
603 goto bad;
604 }
605 break;
606 }
607 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
608 la &= (u32)-1;
609 *linear = la;
610 return X86EMUL_CONTINUE;
611 bad:
612 if (addr.seg == VCPU_SREG_SS)
613 return emulate_ss(ctxt, addr.seg);
614 else
615 return emulate_gp(ctxt, addr.seg);
616 }
617
618 static int linearize(struct x86_emulate_ctxt *ctxt,
619 struct segmented_address addr,
620 unsigned size, bool write,
621 ulong *linear)
622 {
623 return __linearize(ctxt, addr, size, write, false, linear);
624 }
625
626
627 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
628 struct segmented_address addr,
629 void *data,
630 unsigned size)
631 {
632 int rc;
633 ulong linear;
634
635 rc = linearize(ctxt, addr, size, false, &linear);
636 if (rc != X86EMUL_CONTINUE)
637 return rc;
638 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
639 }
640
641 /*
642 * Fetch the next byte of the instruction being emulated which is pointed to
643 * by ctxt->_eip, then increment ctxt->_eip.
644 *
645 * Also prefetch the remaining bytes of the instruction without crossing page
646 * boundary if they are not in fetch_cache yet.
647 */
648 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
649 {
650 struct fetch_cache *fc = &ctxt->fetch;
651 int rc;
652 int size, cur_size;
653
654 if (ctxt->_eip == fc->end) {
655 unsigned long linear;
656 struct segmented_address addr = { .seg = VCPU_SREG_CS,
657 .ea = ctxt->_eip };
658 cur_size = fc->end - fc->start;
659 size = min(15UL - cur_size,
660 PAGE_SIZE - offset_in_page(ctxt->_eip));
661 rc = __linearize(ctxt, addr, size, false, true, &linear);
662 if (unlikely(rc != X86EMUL_CONTINUE))
663 return rc;
664 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
665 size, &ctxt->exception);
666 if (unlikely(rc != X86EMUL_CONTINUE))
667 return rc;
668 fc->end += size;
669 }
670 *dest = fc->data[ctxt->_eip - fc->start];
671 ctxt->_eip++;
672 return X86EMUL_CONTINUE;
673 }
674
675 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
676 void *dest, unsigned size)
677 {
678 int rc;
679
680 /* x86 instructions are limited to 15 bytes. */
681 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
682 return X86EMUL_UNHANDLEABLE;
683 while (size--) {
684 rc = do_insn_fetch_byte(ctxt, dest++);
685 if (rc != X86EMUL_CONTINUE)
686 return rc;
687 }
688 return X86EMUL_CONTINUE;
689 }
690
691 /* Fetch next part of the instruction being emulated. */
692 #define insn_fetch(_type, _ctxt) \
693 ({ unsigned long _x; \
694 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
695 if (rc != X86EMUL_CONTINUE) \
696 goto done; \
697 (_type)_x; \
698 })
699
700 #define insn_fetch_arr(_arr, _size, _ctxt) \
701 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
702 if (rc != X86EMUL_CONTINUE) \
703 goto done; \
704 })
705
706 /*
707 * Given the 'reg' portion of a ModRM byte, and a register block, return a
708 * pointer into the block that addresses the relevant register.
709 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
710 */
711 static void *decode_register(u8 modrm_reg, unsigned long *regs,
712 int highbyte_regs)
713 {
714 void *p;
715
716 p = &regs[modrm_reg];
717 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
718 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
719 return p;
720 }
721
722 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
723 struct segmented_address addr,
724 u16 *size, unsigned long *address, int op_bytes)
725 {
726 int rc;
727
728 if (op_bytes == 2)
729 op_bytes = 3;
730 *address = 0;
731 rc = segmented_read_std(ctxt, addr, size, 2);
732 if (rc != X86EMUL_CONTINUE)
733 return rc;
734 addr.ea += 2;
735 rc = segmented_read_std(ctxt, addr, address, op_bytes);
736 return rc;
737 }
738
739 static int test_cc(unsigned int condition, unsigned int flags)
740 {
741 int rc = 0;
742
743 switch ((condition & 15) >> 1) {
744 case 0: /* o */
745 rc |= (flags & EFLG_OF);
746 break;
747 case 1: /* b/c/nae */
748 rc |= (flags & EFLG_CF);
749 break;
750 case 2: /* z/e */
751 rc |= (flags & EFLG_ZF);
752 break;
753 case 3: /* be/na */
754 rc |= (flags & (EFLG_CF|EFLG_ZF));
755 break;
756 case 4: /* s */
757 rc |= (flags & EFLG_SF);
758 break;
759 case 5: /* p/pe */
760 rc |= (flags & EFLG_PF);
761 break;
762 case 7: /* le/ng */
763 rc |= (flags & EFLG_ZF);
764 /* fall through */
765 case 6: /* l/nge */
766 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
767 break;
768 }
769
770 /* Odd condition identifiers (lsb == 1) have inverted sense. */
771 return (!!rc ^ (condition & 1));
772 }
773
774 static void fetch_register_operand(struct operand *op)
775 {
776 switch (op->bytes) {
777 case 1:
778 op->val = *(u8 *)op->addr.reg;
779 break;
780 case 2:
781 op->val = *(u16 *)op->addr.reg;
782 break;
783 case 4:
784 op->val = *(u32 *)op->addr.reg;
785 break;
786 case 8:
787 op->val = *(u64 *)op->addr.reg;
788 break;
789 }
790 }
791
792 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
793 {
794 ctxt->ops->get_fpu(ctxt);
795 switch (reg) {
796 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
797 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
798 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
799 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
800 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
801 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
802 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
803 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
804 #ifdef CONFIG_X86_64
805 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
806 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
807 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
808 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
809 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
810 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
811 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
812 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
813 #endif
814 default: BUG();
815 }
816 ctxt->ops->put_fpu(ctxt);
817 }
818
819 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
820 int reg)
821 {
822 ctxt->ops->get_fpu(ctxt);
823 switch (reg) {
824 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
825 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
826 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
827 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
828 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
829 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
830 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
831 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
832 #ifdef CONFIG_X86_64
833 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
834 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
835 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
836 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
837 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
838 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
839 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
840 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
841 #endif
842 default: BUG();
843 }
844 ctxt->ops->put_fpu(ctxt);
845 }
846
847 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
848 struct operand *op,
849 int inhibit_bytereg)
850 {
851 unsigned reg = ctxt->modrm_reg;
852 int highbyte_regs = ctxt->rex_prefix == 0;
853
854 if (!(ctxt->d & ModRM))
855 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
856
857 if (ctxt->d & Sse) {
858 op->type = OP_XMM;
859 op->bytes = 16;
860 op->addr.xmm = reg;
861 read_sse_reg(ctxt, &op->vec_val, reg);
862 return;
863 }
864
865 op->type = OP_REG;
866 if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
867 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
868 op->bytes = 1;
869 } else {
870 op->addr.reg = decode_register(reg, ctxt->regs, 0);
871 op->bytes = ctxt->op_bytes;
872 }
873 fetch_register_operand(op);
874 op->orig_val = op->val;
875 }
876
877 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
878 struct operand *op)
879 {
880 u8 sib;
881 int index_reg = 0, base_reg = 0, scale;
882 int rc = X86EMUL_CONTINUE;
883 ulong modrm_ea = 0;
884
885 if (ctxt->rex_prefix) {
886 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
887 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
888 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
889 }
890
891 ctxt->modrm = insn_fetch(u8, ctxt);
892 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
893 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
894 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
895 ctxt->modrm_seg = VCPU_SREG_DS;
896
897 if (ctxt->modrm_mod == 3) {
898 op->type = OP_REG;
899 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
900 op->addr.reg = decode_register(ctxt->modrm_rm,
901 ctxt->regs, ctxt->d & ByteOp);
902 if (ctxt->d & Sse) {
903 op->type = OP_XMM;
904 op->bytes = 16;
905 op->addr.xmm = ctxt->modrm_rm;
906 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
907 return rc;
908 }
909 fetch_register_operand(op);
910 return rc;
911 }
912
913 op->type = OP_MEM;
914
915 if (ctxt->ad_bytes == 2) {
916 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
917 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
918 unsigned si = ctxt->regs[VCPU_REGS_RSI];
919 unsigned di = ctxt->regs[VCPU_REGS_RDI];
920
921 /* 16-bit ModR/M decode. */
922 switch (ctxt->modrm_mod) {
923 case 0:
924 if (ctxt->modrm_rm == 6)
925 modrm_ea += insn_fetch(u16, ctxt);
926 break;
927 case 1:
928 modrm_ea += insn_fetch(s8, ctxt);
929 break;
930 case 2:
931 modrm_ea += insn_fetch(u16, ctxt);
932 break;
933 }
934 switch (ctxt->modrm_rm) {
935 case 0:
936 modrm_ea += bx + si;
937 break;
938 case 1:
939 modrm_ea += bx + di;
940 break;
941 case 2:
942 modrm_ea += bp + si;
943 break;
944 case 3:
945 modrm_ea += bp + di;
946 break;
947 case 4:
948 modrm_ea += si;
949 break;
950 case 5:
951 modrm_ea += di;
952 break;
953 case 6:
954 if (ctxt->modrm_mod != 0)
955 modrm_ea += bp;
956 break;
957 case 7:
958 modrm_ea += bx;
959 break;
960 }
961 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
962 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
963 ctxt->modrm_seg = VCPU_SREG_SS;
964 modrm_ea = (u16)modrm_ea;
965 } else {
966 /* 32/64-bit ModR/M decode. */
967 if ((ctxt->modrm_rm & 7) == 4) {
968 sib = insn_fetch(u8, ctxt);
969 index_reg |= (sib >> 3) & 7;
970 base_reg |= sib & 7;
971 scale = sib >> 6;
972
973 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
974 modrm_ea += insn_fetch(s32, ctxt);
975 else
976 modrm_ea += ctxt->regs[base_reg];
977 if (index_reg != 4)
978 modrm_ea += ctxt->regs[index_reg] << scale;
979 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
980 if (ctxt->mode == X86EMUL_MODE_PROT64)
981 ctxt->rip_relative = 1;
982 } else
983 modrm_ea += ctxt->regs[ctxt->modrm_rm];
984 switch (ctxt->modrm_mod) {
985 case 0:
986 if (ctxt->modrm_rm == 5)
987 modrm_ea += insn_fetch(s32, ctxt);
988 break;
989 case 1:
990 modrm_ea += insn_fetch(s8, ctxt);
991 break;
992 case 2:
993 modrm_ea += insn_fetch(s32, ctxt);
994 break;
995 }
996 }
997 op->addr.mem.ea = modrm_ea;
998 done:
999 return rc;
1000 }
1001
1002 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1003 struct operand *op)
1004 {
1005 int rc = X86EMUL_CONTINUE;
1006
1007 op->type = OP_MEM;
1008 switch (ctxt->ad_bytes) {
1009 case 2:
1010 op->addr.mem.ea = insn_fetch(u16, ctxt);
1011 break;
1012 case 4:
1013 op->addr.mem.ea = insn_fetch(u32, ctxt);
1014 break;
1015 case 8:
1016 op->addr.mem.ea = insn_fetch(u64, ctxt);
1017 break;
1018 }
1019 done:
1020 return rc;
1021 }
1022
1023 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1024 {
1025 long sv = 0, mask;
1026
1027 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1028 mask = ~(ctxt->dst.bytes * 8 - 1);
1029
1030 if (ctxt->src.bytes == 2)
1031 sv = (s16)ctxt->src.val & (s16)mask;
1032 else if (ctxt->src.bytes == 4)
1033 sv = (s32)ctxt->src.val & (s32)mask;
1034
1035 ctxt->dst.addr.mem.ea += (sv >> 3);
1036 }
1037
1038 /* only subword offset */
1039 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1040 }
1041
1042 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1043 unsigned long addr, void *dest, unsigned size)
1044 {
1045 int rc;
1046 struct read_cache *mc = &ctxt->mem_read;
1047
1048 while (size) {
1049 int n = min(size, 8u);
1050 size -= n;
1051 if (mc->pos < mc->end)
1052 goto read_cached;
1053
1054 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1055 &ctxt->exception);
1056 if (rc != X86EMUL_CONTINUE)
1057 return rc;
1058 mc->end += n;
1059
1060 read_cached:
1061 memcpy(dest, mc->data + mc->pos, n);
1062 mc->pos += n;
1063 dest += n;
1064 addr += n;
1065 }
1066 return X86EMUL_CONTINUE;
1067 }
1068
1069 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1070 struct segmented_address addr,
1071 void *data,
1072 unsigned size)
1073 {
1074 int rc;
1075 ulong linear;
1076
1077 rc = linearize(ctxt, addr, size, false, &linear);
1078 if (rc != X86EMUL_CONTINUE)
1079 return rc;
1080 return read_emulated(ctxt, linear, data, size);
1081 }
1082
1083 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1084 struct segmented_address addr,
1085 const void *data,
1086 unsigned size)
1087 {
1088 int rc;
1089 ulong linear;
1090
1091 rc = linearize(ctxt, addr, size, true, &linear);
1092 if (rc != X86EMUL_CONTINUE)
1093 return rc;
1094 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1095 &ctxt->exception);
1096 }
1097
1098 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1099 struct segmented_address addr,
1100 const void *orig_data, const void *data,
1101 unsigned size)
1102 {
1103 int rc;
1104 ulong linear;
1105
1106 rc = linearize(ctxt, addr, size, true, &linear);
1107 if (rc != X86EMUL_CONTINUE)
1108 return rc;
1109 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1110 size, &ctxt->exception);
1111 }
1112
1113 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1114 unsigned int size, unsigned short port,
1115 void *dest)
1116 {
1117 struct read_cache *rc = &ctxt->io_read;
1118
1119 if (rc->pos == rc->end) { /* refill pio read ahead */
1120 unsigned int in_page, n;
1121 unsigned int count = ctxt->rep_prefix ?
1122 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1123 in_page = (ctxt->eflags & EFLG_DF) ?
1124 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1125 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1126 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1127 count);
1128 if (n == 0)
1129 n = 1;
1130 rc->pos = rc->end = 0;
1131 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1132 return 0;
1133 rc->end = n * size;
1134 }
1135
1136 memcpy(dest, rc->data + rc->pos, size);
1137 rc->pos += size;
1138 return 1;
1139 }
1140
1141 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1142 u16 selector, struct desc_ptr *dt)
1143 {
1144 struct x86_emulate_ops *ops = ctxt->ops;
1145
1146 if (selector & 1 << 2) {
1147 struct desc_struct desc;
1148 u16 sel;
1149
1150 memset (dt, 0, sizeof *dt);
1151 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1152 return;
1153
1154 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1155 dt->address = get_desc_base(&desc);
1156 } else
1157 ops->get_gdt(ctxt, dt);
1158 }
1159
1160 /* allowed just for 8 bytes segments */
1161 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1162 u16 selector, struct desc_struct *desc)
1163 {
1164 struct desc_ptr dt;
1165 u16 index = selector >> 3;
1166 ulong addr;
1167
1168 get_descriptor_table_ptr(ctxt, selector, &dt);
1169
1170 if (dt.size < index * 8 + 7)
1171 return emulate_gp(ctxt, selector & 0xfffc);
1172
1173 addr = dt.address + index * 8;
1174 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1175 &ctxt->exception);
1176 }
1177
1178 /* allowed just for 8 bytes segments */
1179 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1180 u16 selector, struct desc_struct *desc)
1181 {
1182 struct desc_ptr dt;
1183 u16 index = selector >> 3;
1184 ulong addr;
1185
1186 get_descriptor_table_ptr(ctxt, selector, &dt);
1187
1188 if (dt.size < index * 8 + 7)
1189 return emulate_gp(ctxt, selector & 0xfffc);
1190
1191 addr = dt.address + index * 8;
1192 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1193 &ctxt->exception);
1194 }
1195
1196 /* Does not support long mode */
1197 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1198 u16 selector, int seg)
1199 {
1200 struct desc_struct seg_desc;
1201 u8 dpl, rpl, cpl;
1202 unsigned err_vec = GP_VECTOR;
1203 u32 err_code = 0;
1204 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1205 int ret;
1206
1207 memset(&seg_desc, 0, sizeof seg_desc);
1208
1209 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1210 || ctxt->mode == X86EMUL_MODE_REAL) {
1211 /* set real mode segment descriptor */
1212 set_desc_base(&seg_desc, selector << 4);
1213 set_desc_limit(&seg_desc, 0xffff);
1214 seg_desc.type = 3;
1215 seg_desc.p = 1;
1216 seg_desc.s = 1;
1217 goto load;
1218 }
1219
1220 /* NULL selector is not valid for TR, CS and SS */
1221 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1222 && null_selector)
1223 goto exception;
1224
1225 /* TR should be in GDT only */
1226 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1227 goto exception;
1228
1229 if (null_selector) /* for NULL selector skip all following checks */
1230 goto load;
1231
1232 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1233 if (ret != X86EMUL_CONTINUE)
1234 return ret;
1235
1236 err_code = selector & 0xfffc;
1237 err_vec = GP_VECTOR;
1238
1239 /* can't load system descriptor into segment selecor */
1240 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1241 goto exception;
1242
1243 if (!seg_desc.p) {
1244 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1245 goto exception;
1246 }
1247
1248 rpl = selector & 3;
1249 dpl = seg_desc.dpl;
1250 cpl = ctxt->ops->cpl(ctxt);
1251
1252 switch (seg) {
1253 case VCPU_SREG_SS:
1254 /*
1255 * segment is not a writable data segment or segment
1256 * selector's RPL != CPL or segment selector's RPL != CPL
1257 */
1258 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1259 goto exception;
1260 break;
1261 case VCPU_SREG_CS:
1262 if (!(seg_desc.type & 8))
1263 goto exception;
1264
1265 if (seg_desc.type & 4) {
1266 /* conforming */
1267 if (dpl > cpl)
1268 goto exception;
1269 } else {
1270 /* nonconforming */
1271 if (rpl > cpl || dpl != cpl)
1272 goto exception;
1273 }
1274 /* CS(RPL) <- CPL */
1275 selector = (selector & 0xfffc) | cpl;
1276 break;
1277 case VCPU_SREG_TR:
1278 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1279 goto exception;
1280 break;
1281 case VCPU_SREG_LDTR:
1282 if (seg_desc.s || seg_desc.type != 2)
1283 goto exception;
1284 break;
1285 default: /* DS, ES, FS, or GS */
1286 /*
1287 * segment is not a data or readable code segment or
1288 * ((segment is a data or nonconforming code segment)
1289 * and (both RPL and CPL > DPL))
1290 */
1291 if ((seg_desc.type & 0xa) == 0x8 ||
1292 (((seg_desc.type & 0xc) != 0xc) &&
1293 (rpl > dpl && cpl > dpl)))
1294 goto exception;
1295 break;
1296 }
1297
1298 if (seg_desc.s) {
1299 /* mark segment as accessed */
1300 seg_desc.type |= 1;
1301 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1302 if (ret != X86EMUL_CONTINUE)
1303 return ret;
1304 }
1305 load:
1306 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1307 return X86EMUL_CONTINUE;
1308 exception:
1309 emulate_exception(ctxt, err_vec, err_code, true);
1310 return X86EMUL_PROPAGATE_FAULT;
1311 }
1312
1313 static void write_register_operand(struct operand *op)
1314 {
1315 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1316 switch (op->bytes) {
1317 case 1:
1318 *(u8 *)op->addr.reg = (u8)op->val;
1319 break;
1320 case 2:
1321 *(u16 *)op->addr.reg = (u16)op->val;
1322 break;
1323 case 4:
1324 *op->addr.reg = (u32)op->val;
1325 break; /* 64b: zero-extend */
1326 case 8:
1327 *op->addr.reg = op->val;
1328 break;
1329 }
1330 }
1331
1332 static int writeback(struct x86_emulate_ctxt *ctxt)
1333 {
1334 int rc;
1335
1336 switch (ctxt->dst.type) {
1337 case OP_REG:
1338 write_register_operand(&ctxt->dst);
1339 break;
1340 case OP_MEM:
1341 if (ctxt->lock_prefix)
1342 rc = segmented_cmpxchg(ctxt,
1343 ctxt->dst.addr.mem,
1344 &ctxt->dst.orig_val,
1345 &ctxt->dst.val,
1346 ctxt->dst.bytes);
1347 else
1348 rc = segmented_write(ctxt,
1349 ctxt->dst.addr.mem,
1350 &ctxt->dst.val,
1351 ctxt->dst.bytes);
1352 if (rc != X86EMUL_CONTINUE)
1353 return rc;
1354 break;
1355 case OP_XMM:
1356 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1357 break;
1358 case OP_NONE:
1359 /* no writeback */
1360 break;
1361 default:
1362 break;
1363 }
1364 return X86EMUL_CONTINUE;
1365 }
1366
1367 static int em_push(struct x86_emulate_ctxt *ctxt)
1368 {
1369 struct segmented_address addr;
1370
1371 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1372 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1373 addr.seg = VCPU_SREG_SS;
1374
1375 /* Disable writeback. */
1376 ctxt->dst.type = OP_NONE;
1377 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1378 }
1379
1380 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1381 void *dest, int len)
1382 {
1383 int rc;
1384 struct segmented_address addr;
1385
1386 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1387 addr.seg = VCPU_SREG_SS;
1388 rc = segmented_read(ctxt, addr, dest, len);
1389 if (rc != X86EMUL_CONTINUE)
1390 return rc;
1391
1392 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1393 return rc;
1394 }
1395
1396 static int em_pop(struct x86_emulate_ctxt *ctxt)
1397 {
1398 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1399 }
1400
1401 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1402 void *dest, int len)
1403 {
1404 int rc;
1405 unsigned long val, change_mask;
1406 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1407 int cpl = ctxt->ops->cpl(ctxt);
1408
1409 rc = emulate_pop(ctxt, &val, len);
1410 if (rc != X86EMUL_CONTINUE)
1411 return rc;
1412
1413 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1414 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1415
1416 switch(ctxt->mode) {
1417 case X86EMUL_MODE_PROT64:
1418 case X86EMUL_MODE_PROT32:
1419 case X86EMUL_MODE_PROT16:
1420 if (cpl == 0)
1421 change_mask |= EFLG_IOPL;
1422 if (cpl <= iopl)
1423 change_mask |= EFLG_IF;
1424 break;
1425 case X86EMUL_MODE_VM86:
1426 if (iopl < 3)
1427 return emulate_gp(ctxt, 0);
1428 change_mask |= EFLG_IF;
1429 break;
1430 default: /* real mode */
1431 change_mask |= (EFLG_IOPL | EFLG_IF);
1432 break;
1433 }
1434
1435 *(unsigned long *)dest =
1436 (ctxt->eflags & ~change_mask) | (val & change_mask);
1437
1438 return rc;
1439 }
1440
1441 static int em_popf(struct x86_emulate_ctxt *ctxt)
1442 {
1443 ctxt->dst.type = OP_REG;
1444 ctxt->dst.addr.reg = &ctxt->eflags;
1445 ctxt->dst.bytes = ctxt->op_bytes;
1446 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1447 }
1448
1449 static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1450 {
1451 ctxt->src.val = get_segment_selector(ctxt, seg);
1452
1453 return em_push(ctxt);
1454 }
1455
1456 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1457 {
1458 unsigned long selector;
1459 int rc;
1460
1461 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1462 if (rc != X86EMUL_CONTINUE)
1463 return rc;
1464
1465 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1466 return rc;
1467 }
1468
1469 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1470 {
1471 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1472 int rc = X86EMUL_CONTINUE;
1473 int reg = VCPU_REGS_RAX;
1474
1475 while (reg <= VCPU_REGS_RDI) {
1476 (reg == VCPU_REGS_RSP) ?
1477 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1478
1479 rc = em_push(ctxt);
1480 if (rc != X86EMUL_CONTINUE)
1481 return rc;
1482
1483 ++reg;
1484 }
1485
1486 return rc;
1487 }
1488
1489 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1490 {
1491 ctxt->src.val = (unsigned long)ctxt->eflags;
1492 return em_push(ctxt);
1493 }
1494
1495 static int em_popa(struct x86_emulate_ctxt *ctxt)
1496 {
1497 int rc = X86EMUL_CONTINUE;
1498 int reg = VCPU_REGS_RDI;
1499
1500 while (reg >= VCPU_REGS_RAX) {
1501 if (reg == VCPU_REGS_RSP) {
1502 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1503 ctxt->op_bytes);
1504 --reg;
1505 }
1506
1507 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1508 if (rc != X86EMUL_CONTINUE)
1509 break;
1510 --reg;
1511 }
1512 return rc;
1513 }
1514
1515 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1516 {
1517 struct x86_emulate_ops *ops = ctxt->ops;
1518 int rc;
1519 struct desc_ptr dt;
1520 gva_t cs_addr;
1521 gva_t eip_addr;
1522 u16 cs, eip;
1523
1524 /* TODO: Add limit checks */
1525 ctxt->src.val = ctxt->eflags;
1526 rc = em_push(ctxt);
1527 if (rc != X86EMUL_CONTINUE)
1528 return rc;
1529
1530 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1531
1532 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1533 rc = em_push(ctxt);
1534 if (rc != X86EMUL_CONTINUE)
1535 return rc;
1536
1537 ctxt->src.val = ctxt->_eip;
1538 rc = em_push(ctxt);
1539 if (rc != X86EMUL_CONTINUE)
1540 return rc;
1541
1542 ops->get_idt(ctxt, &dt);
1543
1544 eip_addr = dt.address + (irq << 2);
1545 cs_addr = dt.address + (irq << 2) + 2;
1546
1547 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1548 if (rc != X86EMUL_CONTINUE)
1549 return rc;
1550
1551 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1552 if (rc != X86EMUL_CONTINUE)
1553 return rc;
1554
1555 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1556 if (rc != X86EMUL_CONTINUE)
1557 return rc;
1558
1559 ctxt->_eip = eip;
1560
1561 return rc;
1562 }
1563
1564 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1565 {
1566 switch(ctxt->mode) {
1567 case X86EMUL_MODE_REAL:
1568 return emulate_int_real(ctxt, irq);
1569 case X86EMUL_MODE_VM86:
1570 case X86EMUL_MODE_PROT16:
1571 case X86EMUL_MODE_PROT32:
1572 case X86EMUL_MODE_PROT64:
1573 default:
1574 /* Protected mode interrupts unimplemented yet */
1575 return X86EMUL_UNHANDLEABLE;
1576 }
1577 }
1578
1579 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1580 {
1581 int rc = X86EMUL_CONTINUE;
1582 unsigned long temp_eip = 0;
1583 unsigned long temp_eflags = 0;
1584 unsigned long cs = 0;
1585 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1586 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1587 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1588 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1589
1590 /* TODO: Add stack limit check */
1591
1592 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1593
1594 if (rc != X86EMUL_CONTINUE)
1595 return rc;
1596
1597 if (temp_eip & ~0xffff)
1598 return emulate_gp(ctxt, 0);
1599
1600 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1601
1602 if (rc != X86EMUL_CONTINUE)
1603 return rc;
1604
1605 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1606
1607 if (rc != X86EMUL_CONTINUE)
1608 return rc;
1609
1610 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1611
1612 if (rc != X86EMUL_CONTINUE)
1613 return rc;
1614
1615 ctxt->_eip = temp_eip;
1616
1617
1618 if (ctxt->op_bytes == 4)
1619 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1620 else if (ctxt->op_bytes == 2) {
1621 ctxt->eflags &= ~0xffff;
1622 ctxt->eflags |= temp_eflags;
1623 }
1624
1625 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1626 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1627
1628 return rc;
1629 }
1630
1631 static int em_iret(struct x86_emulate_ctxt *ctxt)
1632 {
1633 switch(ctxt->mode) {
1634 case X86EMUL_MODE_REAL:
1635 return emulate_iret_real(ctxt);
1636 case X86EMUL_MODE_VM86:
1637 case X86EMUL_MODE_PROT16:
1638 case X86EMUL_MODE_PROT32:
1639 case X86EMUL_MODE_PROT64:
1640 default:
1641 /* iret from protected mode unimplemented yet */
1642 return X86EMUL_UNHANDLEABLE;
1643 }
1644 }
1645
1646 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1647 {
1648 int rc;
1649 unsigned short sel;
1650
1651 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1652
1653 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1654 if (rc != X86EMUL_CONTINUE)
1655 return rc;
1656
1657 ctxt->_eip = 0;
1658 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1659 return X86EMUL_CONTINUE;
1660 }
1661
1662 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1663 {
1664 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1665 }
1666
1667 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1668 {
1669 switch (ctxt->modrm_reg) {
1670 case 0: /* rol */
1671 emulate_2op_SrcB(ctxt, "rol");
1672 break;
1673 case 1: /* ror */
1674 emulate_2op_SrcB(ctxt, "ror");
1675 break;
1676 case 2: /* rcl */
1677 emulate_2op_SrcB(ctxt, "rcl");
1678 break;
1679 case 3: /* rcr */
1680 emulate_2op_SrcB(ctxt, "rcr");
1681 break;
1682 case 4: /* sal/shl */
1683 case 6: /* sal/shl */
1684 emulate_2op_SrcB(ctxt, "sal");
1685 break;
1686 case 5: /* shr */
1687 emulate_2op_SrcB(ctxt, "shr");
1688 break;
1689 case 7: /* sar */
1690 emulate_2op_SrcB(ctxt, "sar");
1691 break;
1692 }
1693 return X86EMUL_CONTINUE;
1694 }
1695
1696 static int em_not(struct x86_emulate_ctxt *ctxt)
1697 {
1698 ctxt->dst.val = ~ctxt->dst.val;
1699 return X86EMUL_CONTINUE;
1700 }
1701
1702 static int em_neg(struct x86_emulate_ctxt *ctxt)
1703 {
1704 emulate_1op(ctxt, "neg");
1705 return X86EMUL_CONTINUE;
1706 }
1707
1708 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1709 {
1710 u8 ex = 0;
1711
1712 emulate_1op_rax_rdx(ctxt, "mul", ex);
1713 return X86EMUL_CONTINUE;
1714 }
1715
1716 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1717 {
1718 u8 ex = 0;
1719
1720 emulate_1op_rax_rdx(ctxt, "imul", ex);
1721 return X86EMUL_CONTINUE;
1722 }
1723
1724 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1725 {
1726 u8 de = 0;
1727
1728 emulate_1op_rax_rdx(ctxt, "div", de);
1729 if (de)
1730 return emulate_de(ctxt);
1731 return X86EMUL_CONTINUE;
1732 }
1733
1734 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1735 {
1736 u8 de = 0;
1737
1738 emulate_1op_rax_rdx(ctxt, "idiv", de);
1739 if (de)
1740 return emulate_de(ctxt);
1741 return X86EMUL_CONTINUE;
1742 }
1743
1744 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1745 {
1746 int rc = X86EMUL_CONTINUE;
1747
1748 switch (ctxt->modrm_reg) {
1749 case 0: /* inc */
1750 emulate_1op(ctxt, "inc");
1751 break;
1752 case 1: /* dec */
1753 emulate_1op(ctxt, "dec");
1754 break;
1755 case 2: /* call near abs */ {
1756 long int old_eip;
1757 old_eip = ctxt->_eip;
1758 ctxt->_eip = ctxt->src.val;
1759 ctxt->src.val = old_eip;
1760 rc = em_push(ctxt);
1761 break;
1762 }
1763 case 4: /* jmp abs */
1764 ctxt->_eip = ctxt->src.val;
1765 break;
1766 case 5: /* jmp far */
1767 rc = em_jmp_far(ctxt);
1768 break;
1769 case 6: /* push */
1770 rc = em_push(ctxt);
1771 break;
1772 }
1773 return rc;
1774 }
1775
1776 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1777 {
1778 u64 old = ctxt->dst.orig_val64;
1779
1780 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1781 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1782 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1783 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1784 ctxt->eflags &= ~EFLG_ZF;
1785 } else {
1786 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1787 (u32) ctxt->regs[VCPU_REGS_RBX];
1788
1789 ctxt->eflags |= EFLG_ZF;
1790 }
1791 return X86EMUL_CONTINUE;
1792 }
1793
1794 static int em_ret(struct x86_emulate_ctxt *ctxt)
1795 {
1796 ctxt->dst.type = OP_REG;
1797 ctxt->dst.addr.reg = &ctxt->_eip;
1798 ctxt->dst.bytes = ctxt->op_bytes;
1799 return em_pop(ctxt);
1800 }
1801
1802 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1803 {
1804 int rc;
1805 unsigned long cs;
1806
1807 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1808 if (rc != X86EMUL_CONTINUE)
1809 return rc;
1810 if (ctxt->op_bytes == 4)
1811 ctxt->_eip = (u32)ctxt->_eip;
1812 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1813 if (rc != X86EMUL_CONTINUE)
1814 return rc;
1815 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1816 return rc;
1817 }
1818
1819 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
1820 {
1821 unsigned short sel;
1822 int rc;
1823
1824 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1825
1826 rc = load_segment_descriptor(ctxt, sel, seg);
1827 if (rc != X86EMUL_CONTINUE)
1828 return rc;
1829
1830 ctxt->dst.val = ctxt->src.val;
1831 return rc;
1832 }
1833
1834 static void
1835 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1836 struct desc_struct *cs, struct desc_struct *ss)
1837 {
1838 u16 selector;
1839
1840 memset(cs, 0, sizeof(struct desc_struct));
1841 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1842 memset(ss, 0, sizeof(struct desc_struct));
1843
1844 cs->l = 0; /* will be adjusted later */
1845 set_desc_base(cs, 0); /* flat segment */
1846 cs->g = 1; /* 4kb granularity */
1847 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1848 cs->type = 0x0b; /* Read, Execute, Accessed */
1849 cs->s = 1;
1850 cs->dpl = 0; /* will be adjusted later */
1851 cs->p = 1;
1852 cs->d = 1;
1853
1854 set_desc_base(ss, 0); /* flat segment */
1855 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1856 ss->g = 1; /* 4kb granularity */
1857 ss->s = 1;
1858 ss->type = 0x03; /* Read/Write, Accessed */
1859 ss->d = 1; /* 32bit stack segment */
1860 ss->dpl = 0;
1861 ss->p = 1;
1862 }
1863
1864 static int em_syscall(struct x86_emulate_ctxt *ctxt)
1865 {
1866 struct x86_emulate_ops *ops = ctxt->ops;
1867 struct desc_struct cs, ss;
1868 u64 msr_data;
1869 u16 cs_sel, ss_sel;
1870 u64 efer = 0;
1871
1872 /* syscall is not available in real mode */
1873 if (ctxt->mode == X86EMUL_MODE_REAL ||
1874 ctxt->mode == X86EMUL_MODE_VM86)
1875 return emulate_ud(ctxt);
1876
1877 ops->get_msr(ctxt, MSR_EFER, &efer);
1878 setup_syscalls_segments(ctxt, &cs, &ss);
1879
1880 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1881 msr_data >>= 32;
1882 cs_sel = (u16)(msr_data & 0xfffc);
1883 ss_sel = (u16)(msr_data + 8);
1884
1885 if (efer & EFER_LMA) {
1886 cs.d = 0;
1887 cs.l = 1;
1888 }
1889 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1890 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1891
1892 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
1893 if (efer & EFER_LMA) {
1894 #ifdef CONFIG_X86_64
1895 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1896
1897 ops->get_msr(ctxt,
1898 ctxt->mode == X86EMUL_MODE_PROT64 ?
1899 MSR_LSTAR : MSR_CSTAR, &msr_data);
1900 ctxt->_eip = msr_data;
1901
1902 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1903 ctxt->eflags &= ~(msr_data | EFLG_RF);
1904 #endif
1905 } else {
1906 /* legacy mode */
1907 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1908 ctxt->_eip = (u32)msr_data;
1909
1910 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1911 }
1912
1913 return X86EMUL_CONTINUE;
1914 }
1915
1916 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1917 {
1918 struct x86_emulate_ops *ops = ctxt->ops;
1919 struct desc_struct cs, ss;
1920 u64 msr_data;
1921 u16 cs_sel, ss_sel;
1922 u64 efer = 0;
1923
1924 ops->get_msr(ctxt, MSR_EFER, &efer);
1925 /* inject #GP if in real mode */
1926 if (ctxt->mode == X86EMUL_MODE_REAL)
1927 return emulate_gp(ctxt, 0);
1928
1929 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1930 * Therefore, we inject an #UD.
1931 */
1932 if (ctxt->mode == X86EMUL_MODE_PROT64)
1933 return emulate_ud(ctxt);
1934
1935 setup_syscalls_segments(ctxt, &cs, &ss);
1936
1937 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1938 switch (ctxt->mode) {
1939 case X86EMUL_MODE_PROT32:
1940 if ((msr_data & 0xfffc) == 0x0)
1941 return emulate_gp(ctxt, 0);
1942 break;
1943 case X86EMUL_MODE_PROT64:
1944 if (msr_data == 0x0)
1945 return emulate_gp(ctxt, 0);
1946 break;
1947 }
1948
1949 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1950 cs_sel = (u16)msr_data;
1951 cs_sel &= ~SELECTOR_RPL_MASK;
1952 ss_sel = cs_sel + 8;
1953 ss_sel &= ~SELECTOR_RPL_MASK;
1954 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1955 cs.d = 0;
1956 cs.l = 1;
1957 }
1958
1959 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1960 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1961
1962 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1963 ctxt->_eip = msr_data;
1964
1965 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1966 ctxt->regs[VCPU_REGS_RSP] = msr_data;
1967
1968 return X86EMUL_CONTINUE;
1969 }
1970
1971 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1972 {
1973 struct x86_emulate_ops *ops = ctxt->ops;
1974 struct desc_struct cs, ss;
1975 u64 msr_data;
1976 int usermode;
1977 u16 cs_sel = 0, ss_sel = 0;
1978
1979 /* inject #GP if in real mode or Virtual 8086 mode */
1980 if (ctxt->mode == X86EMUL_MODE_REAL ||
1981 ctxt->mode == X86EMUL_MODE_VM86)
1982 return emulate_gp(ctxt, 0);
1983
1984 setup_syscalls_segments(ctxt, &cs, &ss);
1985
1986 if ((ctxt->rex_prefix & 0x8) != 0x0)
1987 usermode = X86EMUL_MODE_PROT64;
1988 else
1989 usermode = X86EMUL_MODE_PROT32;
1990
1991 cs.dpl = 3;
1992 ss.dpl = 3;
1993 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1994 switch (usermode) {
1995 case X86EMUL_MODE_PROT32:
1996 cs_sel = (u16)(msr_data + 16);
1997 if ((msr_data & 0xfffc) == 0x0)
1998 return emulate_gp(ctxt, 0);
1999 ss_sel = (u16)(msr_data + 24);
2000 break;
2001 case X86EMUL_MODE_PROT64:
2002 cs_sel = (u16)(msr_data + 32);
2003 if (msr_data == 0x0)
2004 return emulate_gp(ctxt, 0);
2005 ss_sel = cs_sel + 8;
2006 cs.d = 0;
2007 cs.l = 1;
2008 break;
2009 }
2010 cs_sel |= SELECTOR_RPL_MASK;
2011 ss_sel |= SELECTOR_RPL_MASK;
2012
2013 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2014 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2015
2016 ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2017 ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2018
2019 return X86EMUL_CONTINUE;
2020 }
2021
2022 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2023 {
2024 int iopl;
2025 if (ctxt->mode == X86EMUL_MODE_REAL)
2026 return false;
2027 if (ctxt->mode == X86EMUL_MODE_VM86)
2028 return true;
2029 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2030 return ctxt->ops->cpl(ctxt) > iopl;
2031 }
2032
2033 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2034 u16 port, u16 len)
2035 {
2036 struct x86_emulate_ops *ops = ctxt->ops;
2037 struct desc_struct tr_seg;
2038 u32 base3;
2039 int r;
2040 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2041 unsigned mask = (1 << len) - 1;
2042 unsigned long base;
2043
2044 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2045 if (!tr_seg.p)
2046 return false;
2047 if (desc_limit_scaled(&tr_seg) < 103)
2048 return false;
2049 base = get_desc_base(&tr_seg);
2050 #ifdef CONFIG_X86_64
2051 base |= ((u64)base3) << 32;
2052 #endif
2053 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2054 if (r != X86EMUL_CONTINUE)
2055 return false;
2056 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2057 return false;
2058 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2059 if (r != X86EMUL_CONTINUE)
2060 return false;
2061 if ((perm >> bit_idx) & mask)
2062 return false;
2063 return true;
2064 }
2065
2066 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2067 u16 port, u16 len)
2068 {
2069 if (ctxt->perm_ok)
2070 return true;
2071
2072 if (emulator_bad_iopl(ctxt))
2073 if (!emulator_io_port_access_allowed(ctxt, port, len))
2074 return false;
2075
2076 ctxt->perm_ok = true;
2077
2078 return true;
2079 }
2080
2081 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2082 struct tss_segment_16 *tss)
2083 {
2084 tss->ip = ctxt->_eip;
2085 tss->flag = ctxt->eflags;
2086 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2087 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2088 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2089 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2090 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2091 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2092 tss->si = ctxt->regs[VCPU_REGS_RSI];
2093 tss->di = ctxt->regs[VCPU_REGS_RDI];
2094
2095 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2096 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2097 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2098 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2099 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2100 }
2101
2102 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2103 struct tss_segment_16 *tss)
2104 {
2105 int ret;
2106
2107 ctxt->_eip = tss->ip;
2108 ctxt->eflags = tss->flag | 2;
2109 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2110 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2111 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2112 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2113 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2114 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2115 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2116 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2117
2118 /*
2119 * SDM says that segment selectors are loaded before segment
2120 * descriptors
2121 */
2122 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2123 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2124 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2125 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2126 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2127
2128 /*
2129 * Now load segment descriptors. If fault happenes at this stage
2130 * it is handled in a context of new task
2131 */
2132 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2133 if (ret != X86EMUL_CONTINUE)
2134 return ret;
2135 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2136 if (ret != X86EMUL_CONTINUE)
2137 return ret;
2138 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2139 if (ret != X86EMUL_CONTINUE)
2140 return ret;
2141 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2142 if (ret != X86EMUL_CONTINUE)
2143 return ret;
2144 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2145 if (ret != X86EMUL_CONTINUE)
2146 return ret;
2147
2148 return X86EMUL_CONTINUE;
2149 }
2150
2151 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2152 u16 tss_selector, u16 old_tss_sel,
2153 ulong old_tss_base, struct desc_struct *new_desc)
2154 {
2155 struct x86_emulate_ops *ops = ctxt->ops;
2156 struct tss_segment_16 tss_seg;
2157 int ret;
2158 u32 new_tss_base = get_desc_base(new_desc);
2159
2160 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2161 &ctxt->exception);
2162 if (ret != X86EMUL_CONTINUE)
2163 /* FIXME: need to provide precise fault address */
2164 return ret;
2165
2166 save_state_to_tss16(ctxt, &tss_seg);
2167
2168 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2169 &ctxt->exception);
2170 if (ret != X86EMUL_CONTINUE)
2171 /* FIXME: need to provide precise fault address */
2172 return ret;
2173
2174 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2175 &ctxt->exception);
2176 if (ret != X86EMUL_CONTINUE)
2177 /* FIXME: need to provide precise fault address */
2178 return ret;
2179
2180 if (old_tss_sel != 0xffff) {
2181 tss_seg.prev_task_link = old_tss_sel;
2182
2183 ret = ops->write_std(ctxt, new_tss_base,
2184 &tss_seg.prev_task_link,
2185 sizeof tss_seg.prev_task_link,
2186 &ctxt->exception);
2187 if (ret != X86EMUL_CONTINUE)
2188 /* FIXME: need to provide precise fault address */
2189 return ret;
2190 }
2191
2192 return load_state_from_tss16(ctxt, &tss_seg);
2193 }
2194
2195 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2196 struct tss_segment_32 *tss)
2197 {
2198 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2199 tss->eip = ctxt->_eip;
2200 tss->eflags = ctxt->eflags;
2201 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2202 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2203 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2204 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2205 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2206 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2207 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2208 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2209
2210 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2211 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2212 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2213 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2214 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2215 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2216 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2217 }
2218
2219 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2220 struct tss_segment_32 *tss)
2221 {
2222 int ret;
2223
2224 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2225 return emulate_gp(ctxt, 0);
2226 ctxt->_eip = tss->eip;
2227 ctxt->eflags = tss->eflags | 2;
2228 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2229 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2230 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2231 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2232 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2233 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2234 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2235 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2236
2237 /*
2238 * SDM says that segment selectors are loaded before segment
2239 * descriptors
2240 */
2241 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2242 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2243 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2244 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2245 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2246 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2247 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2248
2249 /*
2250 * Now load segment descriptors. If fault happenes at this stage
2251 * it is handled in a context of new task
2252 */
2253 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2254 if (ret != X86EMUL_CONTINUE)
2255 return ret;
2256 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2257 if (ret != X86EMUL_CONTINUE)
2258 return ret;
2259 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2260 if (ret != X86EMUL_CONTINUE)
2261 return ret;
2262 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2263 if (ret != X86EMUL_CONTINUE)
2264 return ret;
2265 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2266 if (ret != X86EMUL_CONTINUE)
2267 return ret;
2268 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2269 if (ret != X86EMUL_CONTINUE)
2270 return ret;
2271 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2272 if (ret != X86EMUL_CONTINUE)
2273 return ret;
2274
2275 return X86EMUL_CONTINUE;
2276 }
2277
2278 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2279 u16 tss_selector, u16 old_tss_sel,
2280 ulong old_tss_base, struct desc_struct *new_desc)
2281 {
2282 struct x86_emulate_ops *ops = ctxt->ops;
2283 struct tss_segment_32 tss_seg;
2284 int ret;
2285 u32 new_tss_base = get_desc_base(new_desc);
2286
2287 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2288 &ctxt->exception);
2289 if (ret != X86EMUL_CONTINUE)
2290 /* FIXME: need to provide precise fault address */
2291 return ret;
2292
2293 save_state_to_tss32(ctxt, &tss_seg);
2294
2295 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2296 &ctxt->exception);
2297 if (ret != X86EMUL_CONTINUE)
2298 /* FIXME: need to provide precise fault address */
2299 return ret;
2300
2301 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2302 &ctxt->exception);
2303 if (ret != X86EMUL_CONTINUE)
2304 /* FIXME: need to provide precise fault address */
2305 return ret;
2306
2307 if (old_tss_sel != 0xffff) {
2308 tss_seg.prev_task_link = old_tss_sel;
2309
2310 ret = ops->write_std(ctxt, new_tss_base,
2311 &tss_seg.prev_task_link,
2312 sizeof tss_seg.prev_task_link,
2313 &ctxt->exception);
2314 if (ret != X86EMUL_CONTINUE)
2315 /* FIXME: need to provide precise fault address */
2316 return ret;
2317 }
2318
2319 return load_state_from_tss32(ctxt, &tss_seg);
2320 }
2321
2322 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2323 u16 tss_selector, int reason,
2324 bool has_error_code, u32 error_code)
2325 {
2326 struct x86_emulate_ops *ops = ctxt->ops;
2327 struct desc_struct curr_tss_desc, next_tss_desc;
2328 int ret;
2329 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2330 ulong old_tss_base =
2331 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2332 u32 desc_limit;
2333
2334 /* FIXME: old_tss_base == ~0 ? */
2335
2336 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2337 if (ret != X86EMUL_CONTINUE)
2338 return ret;
2339 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2340 if (ret != X86EMUL_CONTINUE)
2341 return ret;
2342
2343 /* FIXME: check that next_tss_desc is tss */
2344
2345 if (reason != TASK_SWITCH_IRET) {
2346 if ((tss_selector & 3) > next_tss_desc.dpl ||
2347 ops->cpl(ctxt) > next_tss_desc.dpl)
2348 return emulate_gp(ctxt, 0);
2349 }
2350
2351 desc_limit = desc_limit_scaled(&next_tss_desc);
2352 if (!next_tss_desc.p ||
2353 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2354 desc_limit < 0x2b)) {
2355 emulate_ts(ctxt, tss_selector & 0xfffc);
2356 return X86EMUL_PROPAGATE_FAULT;
2357 }
2358
2359 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2360 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2361 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2362 }
2363
2364 if (reason == TASK_SWITCH_IRET)
2365 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2366
2367 /* set back link to prev task only if NT bit is set in eflags
2368 note that old_tss_sel is not used afetr this point */
2369 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2370 old_tss_sel = 0xffff;
2371
2372 if (next_tss_desc.type & 8)
2373 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2374 old_tss_base, &next_tss_desc);
2375 else
2376 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2377 old_tss_base, &next_tss_desc);
2378 if (ret != X86EMUL_CONTINUE)
2379 return ret;
2380
2381 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2382 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2383
2384 if (reason != TASK_SWITCH_IRET) {
2385 next_tss_desc.type |= (1 << 1); /* set busy flag */
2386 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2387 }
2388
2389 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2390 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2391
2392 if (has_error_code) {
2393 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2394 ctxt->lock_prefix = 0;
2395 ctxt->src.val = (unsigned long) error_code;
2396 ret = em_push(ctxt);
2397 }
2398
2399 return ret;
2400 }
2401
2402 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2403 u16 tss_selector, int reason,
2404 bool has_error_code, u32 error_code)
2405 {
2406 int rc;
2407
2408 ctxt->_eip = ctxt->eip;
2409 ctxt->dst.type = OP_NONE;
2410
2411 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2412 has_error_code, error_code);
2413
2414 if (rc == X86EMUL_CONTINUE)
2415 ctxt->eip = ctxt->_eip;
2416
2417 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2418 }
2419
2420 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2421 int reg, struct operand *op)
2422 {
2423 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2424
2425 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2426 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2427 op->addr.mem.seg = seg;
2428 }
2429
2430 static int em_das(struct x86_emulate_ctxt *ctxt)
2431 {
2432 u8 al, old_al;
2433 bool af, cf, old_cf;
2434
2435 cf = ctxt->eflags & X86_EFLAGS_CF;
2436 al = ctxt->dst.val;
2437
2438 old_al = al;
2439 old_cf = cf;
2440 cf = false;
2441 af = ctxt->eflags & X86_EFLAGS_AF;
2442 if ((al & 0x0f) > 9 || af) {
2443 al -= 6;
2444 cf = old_cf | (al >= 250);
2445 af = true;
2446 } else {
2447 af = false;
2448 }
2449 if (old_al > 0x99 || old_cf) {
2450 al -= 0x60;
2451 cf = true;
2452 }
2453
2454 ctxt->dst.val = al;
2455 /* Set PF, ZF, SF */
2456 ctxt->src.type = OP_IMM;
2457 ctxt->src.val = 0;
2458 ctxt->src.bytes = 1;
2459 emulate_2op_SrcV(ctxt, "or");
2460 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2461 if (cf)
2462 ctxt->eflags |= X86_EFLAGS_CF;
2463 if (af)
2464 ctxt->eflags |= X86_EFLAGS_AF;
2465 return X86EMUL_CONTINUE;
2466 }
2467
2468 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2469 {
2470 u16 sel, old_cs;
2471 ulong old_eip;
2472 int rc;
2473
2474 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2475 old_eip = ctxt->_eip;
2476
2477 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2478 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2479 return X86EMUL_CONTINUE;
2480
2481 ctxt->_eip = 0;
2482 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2483
2484 ctxt->src.val = old_cs;
2485 rc = em_push(ctxt);
2486 if (rc != X86EMUL_CONTINUE)
2487 return rc;
2488
2489 ctxt->src.val = old_eip;
2490 return em_push(ctxt);
2491 }
2492
2493 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2494 {
2495 int rc;
2496
2497 ctxt->dst.type = OP_REG;
2498 ctxt->dst.addr.reg = &ctxt->_eip;
2499 ctxt->dst.bytes = ctxt->op_bytes;
2500 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2501 if (rc != X86EMUL_CONTINUE)
2502 return rc;
2503 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2504 return X86EMUL_CONTINUE;
2505 }
2506
2507 static int em_add(struct x86_emulate_ctxt *ctxt)
2508 {
2509 emulate_2op_SrcV(ctxt, "add");
2510 return X86EMUL_CONTINUE;
2511 }
2512
2513 static int em_or(struct x86_emulate_ctxt *ctxt)
2514 {
2515 emulate_2op_SrcV(ctxt, "or");
2516 return X86EMUL_CONTINUE;
2517 }
2518
2519 static int em_adc(struct x86_emulate_ctxt *ctxt)
2520 {
2521 emulate_2op_SrcV(ctxt, "adc");
2522 return X86EMUL_CONTINUE;
2523 }
2524
2525 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2526 {
2527 emulate_2op_SrcV(ctxt, "sbb");
2528 return X86EMUL_CONTINUE;
2529 }
2530
2531 static int em_and(struct x86_emulate_ctxt *ctxt)
2532 {
2533 emulate_2op_SrcV(ctxt, "and");
2534 return X86EMUL_CONTINUE;
2535 }
2536
2537 static int em_sub(struct x86_emulate_ctxt *ctxt)
2538 {
2539 emulate_2op_SrcV(ctxt, "sub");
2540 return X86EMUL_CONTINUE;
2541 }
2542
2543 static int em_xor(struct x86_emulate_ctxt *ctxt)
2544 {
2545 emulate_2op_SrcV(ctxt, "xor");
2546 return X86EMUL_CONTINUE;
2547 }
2548
2549 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2550 {
2551 emulate_2op_SrcV(ctxt, "cmp");
2552 /* Disable writeback. */
2553 ctxt->dst.type = OP_NONE;
2554 return X86EMUL_CONTINUE;
2555 }
2556
2557 static int em_test(struct x86_emulate_ctxt *ctxt)
2558 {
2559 emulate_2op_SrcV(ctxt, "test");
2560 /* Disable writeback. */
2561 ctxt->dst.type = OP_NONE;
2562 return X86EMUL_CONTINUE;
2563 }
2564
2565 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2566 {
2567 /* Write back the register source. */
2568 ctxt->src.val = ctxt->dst.val;
2569 write_register_operand(&ctxt->src);
2570
2571 /* Write back the memory destination with implicit LOCK prefix. */
2572 ctxt->dst.val = ctxt->src.orig_val;
2573 ctxt->lock_prefix = 1;
2574 return X86EMUL_CONTINUE;
2575 }
2576
2577 static int em_imul(struct x86_emulate_ctxt *ctxt)
2578 {
2579 emulate_2op_SrcV_nobyte(ctxt, "imul");
2580 return X86EMUL_CONTINUE;
2581 }
2582
2583 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2584 {
2585 ctxt->dst.val = ctxt->src2.val;
2586 return em_imul(ctxt);
2587 }
2588
2589 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2590 {
2591 ctxt->dst.type = OP_REG;
2592 ctxt->dst.bytes = ctxt->src.bytes;
2593 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2594 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2595
2596 return X86EMUL_CONTINUE;
2597 }
2598
2599 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2600 {
2601 u64 tsc = 0;
2602
2603 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2604 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2605 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2606 return X86EMUL_CONTINUE;
2607 }
2608
2609 static int em_mov(struct x86_emulate_ctxt *ctxt)
2610 {
2611 ctxt->dst.val = ctxt->src.val;
2612 return X86EMUL_CONTINUE;
2613 }
2614
2615 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2616 {
2617 if (ctxt->modrm_reg > VCPU_SREG_GS)
2618 return emulate_ud(ctxt);
2619
2620 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2621 return X86EMUL_CONTINUE;
2622 }
2623
2624 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2625 {
2626 u16 sel = ctxt->src.val;
2627
2628 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2629 return emulate_ud(ctxt);
2630
2631 if (ctxt->modrm_reg == VCPU_SREG_SS)
2632 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2633
2634 /* Disable writeback. */
2635 ctxt->dst.type = OP_NONE;
2636 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2637 }
2638
2639 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2640 {
2641 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2642 return X86EMUL_CONTINUE;
2643 }
2644
2645 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2646 {
2647 int rc;
2648 ulong linear;
2649
2650 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2651 if (rc == X86EMUL_CONTINUE)
2652 ctxt->ops->invlpg(ctxt, linear);
2653 /* Disable writeback. */
2654 ctxt->dst.type = OP_NONE;
2655 return X86EMUL_CONTINUE;
2656 }
2657
2658 static int em_clts(struct x86_emulate_ctxt *ctxt)
2659 {
2660 ulong cr0;
2661
2662 cr0 = ctxt->ops->get_cr(ctxt, 0);
2663 cr0 &= ~X86_CR0_TS;
2664 ctxt->ops->set_cr(ctxt, 0, cr0);
2665 return X86EMUL_CONTINUE;
2666 }
2667
2668 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2669 {
2670 int rc;
2671
2672 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2673 return X86EMUL_UNHANDLEABLE;
2674
2675 rc = ctxt->ops->fix_hypercall(ctxt);
2676 if (rc != X86EMUL_CONTINUE)
2677 return rc;
2678
2679 /* Let the processor re-execute the fixed hypercall */
2680 ctxt->_eip = ctxt->eip;
2681 /* Disable writeback. */
2682 ctxt->dst.type = OP_NONE;
2683 return X86EMUL_CONTINUE;
2684 }
2685
2686 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2687 {
2688 struct desc_ptr desc_ptr;
2689 int rc;
2690
2691 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2692 &desc_ptr.size, &desc_ptr.address,
2693 ctxt->op_bytes);
2694 if (rc != X86EMUL_CONTINUE)
2695 return rc;
2696 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2697 /* Disable writeback. */
2698 ctxt->dst.type = OP_NONE;
2699 return X86EMUL_CONTINUE;
2700 }
2701
2702 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2703 {
2704 int rc;
2705
2706 rc = ctxt->ops->fix_hypercall(ctxt);
2707
2708 /* Disable writeback. */
2709 ctxt->dst.type = OP_NONE;
2710 return rc;
2711 }
2712
2713 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2714 {
2715 struct desc_ptr desc_ptr;
2716 int rc;
2717
2718 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2719 &desc_ptr.size, &desc_ptr.address,
2720 ctxt->op_bytes);
2721 if (rc != X86EMUL_CONTINUE)
2722 return rc;
2723 ctxt->ops->set_idt(ctxt, &desc_ptr);
2724 /* Disable writeback. */
2725 ctxt->dst.type = OP_NONE;
2726 return X86EMUL_CONTINUE;
2727 }
2728
2729 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2730 {
2731 ctxt->dst.bytes = 2;
2732 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2733 return X86EMUL_CONTINUE;
2734 }
2735
2736 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2737 {
2738 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2739 | (ctxt->src.val & 0x0f));
2740 ctxt->dst.type = OP_NONE;
2741 return X86EMUL_CONTINUE;
2742 }
2743
2744 static int em_loop(struct x86_emulate_ctxt *ctxt)
2745 {
2746 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2747 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2748 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2749 jmp_rel(ctxt, ctxt->src.val);
2750
2751 return X86EMUL_CONTINUE;
2752 }
2753
2754 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2755 {
2756 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2757 jmp_rel(ctxt, ctxt->src.val);
2758
2759 return X86EMUL_CONTINUE;
2760 }
2761
2762 static int em_cli(struct x86_emulate_ctxt *ctxt)
2763 {
2764 if (emulator_bad_iopl(ctxt))
2765 return emulate_gp(ctxt, 0);
2766
2767 ctxt->eflags &= ~X86_EFLAGS_IF;
2768 return X86EMUL_CONTINUE;
2769 }
2770
2771 static int em_sti(struct x86_emulate_ctxt *ctxt)
2772 {
2773 if (emulator_bad_iopl(ctxt))
2774 return emulate_gp(ctxt, 0);
2775
2776 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2777 ctxt->eflags |= X86_EFLAGS_IF;
2778 return X86EMUL_CONTINUE;
2779 }
2780
2781 static bool valid_cr(int nr)
2782 {
2783 switch (nr) {
2784 case 0:
2785 case 2 ... 4:
2786 case 8:
2787 return true;
2788 default:
2789 return false;
2790 }
2791 }
2792
2793 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2794 {
2795 if (!valid_cr(ctxt->modrm_reg))
2796 return emulate_ud(ctxt);
2797
2798 return X86EMUL_CONTINUE;
2799 }
2800
2801 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2802 {
2803 u64 new_val = ctxt->src.val64;
2804 int cr = ctxt->modrm_reg;
2805 u64 efer = 0;
2806
2807 static u64 cr_reserved_bits[] = {
2808 0xffffffff00000000ULL,
2809 0, 0, 0, /* CR3 checked later */
2810 CR4_RESERVED_BITS,
2811 0, 0, 0,
2812 CR8_RESERVED_BITS,
2813 };
2814
2815 if (!valid_cr(cr))
2816 return emulate_ud(ctxt);
2817
2818 if (new_val & cr_reserved_bits[cr])
2819 return emulate_gp(ctxt, 0);
2820
2821 switch (cr) {
2822 case 0: {
2823 u64 cr4;
2824 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2825 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2826 return emulate_gp(ctxt, 0);
2827
2828 cr4 = ctxt->ops->get_cr(ctxt, 4);
2829 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2830
2831 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2832 !(cr4 & X86_CR4_PAE))
2833 return emulate_gp(ctxt, 0);
2834
2835 break;
2836 }
2837 case 3: {
2838 u64 rsvd = 0;
2839
2840 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2841 if (efer & EFER_LMA)
2842 rsvd = CR3_L_MODE_RESERVED_BITS;
2843 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2844 rsvd = CR3_PAE_RESERVED_BITS;
2845 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2846 rsvd = CR3_NONPAE_RESERVED_BITS;
2847
2848 if (new_val & rsvd)
2849 return emulate_gp(ctxt, 0);
2850
2851 break;
2852 }
2853 case 4: {
2854 u64 cr4;
2855
2856 cr4 = ctxt->ops->get_cr(ctxt, 4);
2857 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2858
2859 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2860 return emulate_gp(ctxt, 0);
2861
2862 break;
2863 }
2864 }
2865
2866 return X86EMUL_CONTINUE;
2867 }
2868
2869 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2870 {
2871 unsigned long dr7;
2872
2873 ctxt->ops->get_dr(ctxt, 7, &dr7);
2874
2875 /* Check if DR7.Global_Enable is set */
2876 return dr7 & (1 << 13);
2877 }
2878
2879 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2880 {
2881 int dr = ctxt->modrm_reg;
2882 u64 cr4;
2883
2884 if (dr > 7)
2885 return emulate_ud(ctxt);
2886
2887 cr4 = ctxt->ops->get_cr(ctxt, 4);
2888 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2889 return emulate_ud(ctxt);
2890
2891 if (check_dr7_gd(ctxt))
2892 return emulate_db(ctxt);
2893
2894 return X86EMUL_CONTINUE;
2895 }
2896
2897 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2898 {
2899 u64 new_val = ctxt->src.val64;
2900 int dr = ctxt->modrm_reg;
2901
2902 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2903 return emulate_gp(ctxt, 0);
2904
2905 return check_dr_read(ctxt);
2906 }
2907
2908 static int check_svme(struct x86_emulate_ctxt *ctxt)
2909 {
2910 u64 efer;
2911
2912 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2913
2914 if (!(efer & EFER_SVME))
2915 return emulate_ud(ctxt);
2916
2917 return X86EMUL_CONTINUE;
2918 }
2919
2920 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2921 {
2922 u64 rax = ctxt->regs[VCPU_REGS_RAX];
2923
2924 /* Valid physical address? */
2925 if (rax & 0xffff000000000000ULL)
2926 return emulate_gp(ctxt, 0);
2927
2928 return check_svme(ctxt);
2929 }
2930
2931 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2932 {
2933 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2934
2935 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2936 return emulate_ud(ctxt);
2937
2938 return X86EMUL_CONTINUE;
2939 }
2940
2941 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2942 {
2943 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2944 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
2945
2946 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2947 (rcx > 3))
2948 return emulate_gp(ctxt, 0);
2949
2950 return X86EMUL_CONTINUE;
2951 }
2952
2953 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2954 {
2955 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
2956 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
2957 return emulate_gp(ctxt, 0);
2958
2959 return X86EMUL_CONTINUE;
2960 }
2961
2962 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2963 {
2964 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
2965 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
2966 return emulate_gp(ctxt, 0);
2967
2968 return X86EMUL_CONTINUE;
2969 }
2970
2971 #define D(_y) { .flags = (_y) }
2972 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2973 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2974 .check_perm = (_p) }
2975 #define N D(0)
2976 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2977 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2978 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
2979 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2980 #define II(_f, _e, _i) \
2981 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2982 #define IIP(_f, _e, _i, _p) \
2983 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2984 .check_perm = (_p) }
2985 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2986
2987 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2988 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2989 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2990
2991 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
2992 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
2993 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
2994
2995 static struct opcode group7_rm1[] = {
2996 DI(SrcNone | ModRM | Priv, monitor),
2997 DI(SrcNone | ModRM | Priv, mwait),
2998 N, N, N, N, N, N,
2999 };
3000
3001 static struct opcode group7_rm3[] = {
3002 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3003 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3004 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3005 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3006 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3007 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3008 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3009 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3010 };
3011
3012 static struct opcode group7_rm7[] = {
3013 N,
3014 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3015 N, N, N, N, N, N,
3016 };
3017
3018 static struct opcode group1[] = {
3019 I(Lock, em_add),
3020 I(Lock, em_or),
3021 I(Lock, em_adc),
3022 I(Lock, em_sbb),
3023 I(Lock, em_and),
3024 I(Lock, em_sub),
3025 I(Lock, em_xor),
3026 I(0, em_cmp),
3027 };
3028
3029 static struct opcode group1A[] = {
3030 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3031 };
3032
3033 static struct opcode group3[] = {
3034 I(DstMem | SrcImm | ModRM, em_test),
3035 I(DstMem | SrcImm | ModRM, em_test),
3036 I(DstMem | SrcNone | ModRM | Lock, em_not),
3037 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3038 I(SrcMem | ModRM, em_mul_ex),
3039 I(SrcMem | ModRM, em_imul_ex),
3040 I(SrcMem | ModRM, em_div_ex),
3041 I(SrcMem | ModRM, em_idiv_ex),
3042 };
3043
3044 static struct opcode group4[] = {
3045 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3046 N, N, N, N, N, N,
3047 };
3048
3049 static struct opcode group5[] = {
3050 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3051 D(SrcMem | ModRM | Stack),
3052 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3053 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3054 D(SrcMem | ModRM | Stack), N,
3055 };
3056
3057 static struct opcode group6[] = {
3058 DI(ModRM | Prot, sldt),
3059 DI(ModRM | Prot, str),
3060 DI(ModRM | Prot | Priv, lldt),
3061 DI(ModRM | Prot | Priv, ltr),
3062 N, N, N, N,
3063 };
3064
3065 static struct group_dual group7 = { {
3066 DI(ModRM | Mov | DstMem | Priv, sgdt),
3067 DI(ModRM | Mov | DstMem | Priv, sidt),
3068 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3069 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3070 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3071 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3072 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3073 }, {
3074 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3075 EXT(0, group7_rm1),
3076 N, EXT(0, group7_rm3),
3077 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3078 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3079 } };
3080
3081 static struct opcode group8[] = {
3082 N, N, N, N,
3083 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3084 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3085 };
3086
3087 static struct group_dual group9 = { {
3088 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3089 }, {
3090 N, N, N, N, N, N, N, N,
3091 } };
3092
3093 static struct opcode group11[] = {
3094 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3095 };
3096
3097 static struct gprefix pfx_0f_6f_0f_7f = {
3098 N, N, N, I(Sse, em_movdqu),
3099 };
3100
3101 static struct opcode opcode_table[256] = {
3102 /* 0x00 - 0x07 */
3103 I6ALU(Lock, em_add),
3104 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3105 /* 0x08 - 0x0F */
3106 I6ALU(Lock, em_or),
3107 D(ImplicitOps | Stack | No64), N,
3108 /* 0x10 - 0x17 */
3109 I6ALU(Lock, em_adc),
3110 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3111 /* 0x18 - 0x1F */
3112 I6ALU(Lock, em_sbb),
3113 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3114 /* 0x20 - 0x27 */
3115 I6ALU(Lock, em_and), N, N,
3116 /* 0x28 - 0x2F */
3117 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3118 /* 0x30 - 0x37 */
3119 I6ALU(Lock, em_xor), N, N,
3120 /* 0x38 - 0x3F */
3121 I6ALU(0, em_cmp), N, N,
3122 /* 0x40 - 0x4F */
3123 X16(D(DstReg)),
3124 /* 0x50 - 0x57 */
3125 X8(I(SrcReg | Stack, em_push)),
3126 /* 0x58 - 0x5F */
3127 X8(I(DstReg | Stack, em_pop)),
3128 /* 0x60 - 0x67 */
3129 I(ImplicitOps | Stack | No64, em_pusha),
3130 I(ImplicitOps | Stack | No64, em_popa),
3131 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3132 N, N, N, N,
3133 /* 0x68 - 0x6F */
3134 I(SrcImm | Mov | Stack, em_push),
3135 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3136 I(SrcImmByte | Mov | Stack, em_push),
3137 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3138 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3139 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3140 /* 0x70 - 0x7F */
3141 X16(D(SrcImmByte)),
3142 /* 0x80 - 0x87 */
3143 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3144 G(DstMem | SrcImm | ModRM | Group, group1),
3145 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3146 G(DstMem | SrcImmByte | ModRM | Group, group1),
3147 I2bv(DstMem | SrcReg | ModRM, em_test),
3148 I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3149 /* 0x88 - 0x8F */
3150 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3151 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3152 I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3153 D(ModRM | SrcMem | NoAccess | DstReg),
3154 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3155 G(0, group1A),
3156 /* 0x90 - 0x97 */
3157 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3158 /* 0x98 - 0x9F */
3159 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3160 I(SrcImmFAddr | No64, em_call_far), N,
3161 II(ImplicitOps | Stack, em_pushf, pushf),
3162 II(ImplicitOps | Stack, em_popf, popf), N, N,
3163 /* 0xA0 - 0xA7 */
3164 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3165 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3166 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3167 I2bv(SrcSI | DstDI | String, em_cmp),
3168 /* 0xA8 - 0xAF */
3169 I2bv(DstAcc | SrcImm, em_test),
3170 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3171 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3172 I2bv(SrcAcc | DstDI | String, em_cmp),
3173 /* 0xB0 - 0xB7 */
3174 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3175 /* 0xB8 - 0xBF */
3176 X8(I(DstReg | SrcImm | Mov, em_mov)),
3177 /* 0xC0 - 0xC7 */
3178 D2bv(DstMem | SrcImmByte | ModRM),
3179 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3180 I(ImplicitOps | Stack, em_ret),
3181 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
3182 G(ByteOp, group11), G(0, group11),
3183 /* 0xC8 - 0xCF */
3184 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3185 D(ImplicitOps), DI(SrcImmByte, intn),
3186 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3187 /* 0xD0 - 0xD7 */
3188 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3189 N, N, N, N,
3190 /* 0xD8 - 0xDF */
3191 N, N, N, N, N, N, N, N,
3192 /* 0xE0 - 0xE7 */
3193 X3(I(SrcImmByte, em_loop)),
3194 I(SrcImmByte, em_jcxz),
3195 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3196 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3197 /* 0xE8 - 0xEF */
3198 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3199 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3200 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3201 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3202 /* 0xF0 - 0xF7 */
3203 N, DI(ImplicitOps, icebp), N, N,
3204 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3205 G(ByteOp, group3), G(0, group3),
3206 /* 0xF8 - 0xFF */
3207 D(ImplicitOps), D(ImplicitOps),
3208 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3209 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3210 };
3211
3212 static struct opcode twobyte_table[256] = {
3213 /* 0x00 - 0x0F */
3214 G(0, group6), GD(0, &group7), N, N,
3215 N, I(ImplicitOps | VendorSpecific, em_syscall),
3216 II(ImplicitOps | Priv, em_clts, clts), N,
3217 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3218 N, D(ImplicitOps | ModRM), N, N,
3219 /* 0x10 - 0x1F */
3220 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3221 /* 0x20 - 0x2F */
3222 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3223 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3224 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3225 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3226 N, N, N, N,
3227 N, N, N, N, N, N, N, N,
3228 /* 0x30 - 0x3F */
3229 DI(ImplicitOps | Priv, wrmsr),
3230 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3231 DI(ImplicitOps | Priv, rdmsr),
3232 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3233 I(ImplicitOps | VendorSpecific, em_sysenter),
3234 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3235 N, N,
3236 N, N, N, N, N, N, N, N,
3237 /* 0x40 - 0x4F */
3238 X16(D(DstReg | SrcMem | ModRM | Mov)),
3239 /* 0x50 - 0x5F */
3240 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3241 /* 0x60 - 0x6F */
3242 N, N, N, N,
3243 N, N, N, N,
3244 N, N, N, N,
3245 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3246 /* 0x70 - 0x7F */
3247 N, N, N, N,
3248 N, N, N, N,
3249 N, N, N, N,
3250 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3251 /* 0x80 - 0x8F */
3252 X16(D(SrcImm)),
3253 /* 0x90 - 0x9F */
3254 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3255 /* 0xA0 - 0xA7 */
3256 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3257 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3258 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3259 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3260 /* 0xA8 - 0xAF */
3261 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3262 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3263 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3264 D(DstMem | SrcReg | Src2CL | ModRM),
3265 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3266 /* 0xB0 - 0xB7 */
3267 D2bv(DstMem | SrcReg | ModRM | Lock),
3268 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3269 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3270 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3271 /* 0xB8 - 0xBF */
3272 N, N,
3273 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3274 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3275 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3276 /* 0xC0 - 0xCF */
3277 D2bv(DstMem | SrcReg | ModRM | Lock),
3278 N, D(DstMem | SrcReg | ModRM | Mov),
3279 N, N, N, GD(0, &group9),
3280 N, N, N, N, N, N, N, N,
3281 /* 0xD0 - 0xDF */
3282 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3283 /* 0xE0 - 0xEF */
3284 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3285 /* 0xF0 - 0xFF */
3286 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3287 };
3288
3289 #undef D
3290 #undef N
3291 #undef G
3292 #undef GD
3293 #undef I
3294 #undef GP
3295 #undef EXT
3296
3297 #undef D2bv
3298 #undef D2bvIP
3299 #undef I2bv
3300 #undef I6ALU
3301
3302 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3303 {
3304 unsigned size;
3305
3306 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3307 if (size == 8)
3308 size = 4;
3309 return size;
3310 }
3311
3312 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3313 unsigned size, bool sign_extension)
3314 {
3315 int rc = X86EMUL_CONTINUE;
3316
3317 op->type = OP_IMM;
3318 op->bytes = size;
3319 op->addr.mem.ea = ctxt->_eip;
3320 /* NB. Immediates are sign-extended as necessary. */
3321 switch (op->bytes) {
3322 case 1:
3323 op->val = insn_fetch(s8, ctxt);
3324 break;
3325 case 2:
3326 op->val = insn_fetch(s16, ctxt);
3327 break;
3328 case 4:
3329 op->val = insn_fetch(s32, ctxt);
3330 break;
3331 }
3332 if (!sign_extension) {
3333 switch (op->bytes) {
3334 case 1:
3335 op->val &= 0xff;
3336 break;
3337 case 2:
3338 op->val &= 0xffff;
3339 break;
3340 case 4:
3341 op->val &= 0xffffffff;
3342 break;
3343 }
3344 }
3345 done:
3346 return rc;
3347 }
3348
3349 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3350 unsigned d)
3351 {
3352 int rc = X86EMUL_CONTINUE;
3353
3354 switch (d) {
3355 case OpReg:
3356 decode_register_operand(ctxt, op,
3357 op == &ctxt->dst &&
3358 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3359 break;
3360 case OpImmUByte:
3361 rc = decode_imm(ctxt, op, 1, false);
3362 break;
3363 case OpMem:
3364 case OpMem64:
3365 if (d == OpMem64)
3366 ctxt->memop.bytes = 8;
3367 else
3368 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3369 mem_common:
3370 *op = ctxt->memop;
3371 ctxt->memopp = op;
3372 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3373 fetch_bit_operand(ctxt);
3374 op->orig_val = op->val;
3375 break;
3376 case OpAcc:
3377 op->type = OP_REG;
3378 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3379 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3380 fetch_register_operand(op);
3381 op->orig_val = op->val;
3382 break;
3383 case OpDI:
3384 op->type = OP_MEM;
3385 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3386 op->addr.mem.ea =
3387 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3388 op->addr.mem.seg = VCPU_SREG_ES;
3389 op->val = 0;
3390 break;
3391 case OpDX:
3392 op->type = OP_REG;
3393 op->bytes = 2;
3394 op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3395 fetch_register_operand(op);
3396 break;
3397 case OpCL:
3398 op->bytes = 1;
3399 op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3400 break;
3401 case OpImmByte:
3402 rc = decode_imm(ctxt, op, 1, true);
3403 break;
3404 case OpOne:
3405 op->bytes = 1;
3406 op->val = 1;
3407 break;
3408 case OpImm:
3409 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3410 break;
3411 case OpMem16:
3412 ctxt->memop.bytes = 2;
3413 goto mem_common;
3414 case OpMem32:
3415 ctxt->memop.bytes = 4;
3416 goto mem_common;
3417 case OpImmU16:
3418 rc = decode_imm(ctxt, op, 2, false);
3419 break;
3420 case OpImmU:
3421 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3422 break;
3423 case OpSI:
3424 op->type = OP_MEM;
3425 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3426 op->addr.mem.ea =
3427 register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3428 op->addr.mem.seg = seg_override(ctxt);
3429 op->val = 0;
3430 break;
3431 case OpImmFAddr:
3432 op->type = OP_IMM;
3433 op->addr.mem.ea = ctxt->_eip;
3434 op->bytes = ctxt->op_bytes + 2;
3435 insn_fetch_arr(op->valptr, op->bytes, ctxt);
3436 break;
3437 case OpMemFAddr:
3438 ctxt->memop.bytes = ctxt->op_bytes + 2;
3439 goto mem_common;
3440 case OpImplicit:
3441 /* Special instructions do their own operand decoding. */
3442 default:
3443 op->type = OP_NONE; /* Disable writeback. */
3444 break;
3445 }
3446
3447 done:
3448 return rc;
3449 }
3450
3451 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3452 {
3453 int rc = X86EMUL_CONTINUE;
3454 int mode = ctxt->mode;
3455 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3456 bool op_prefix = false;
3457 struct opcode opcode;
3458
3459 ctxt->memop.type = OP_NONE;
3460 ctxt->memopp = NULL;
3461 ctxt->_eip = ctxt->eip;
3462 ctxt->fetch.start = ctxt->_eip;
3463 ctxt->fetch.end = ctxt->fetch.start + insn_len;
3464 if (insn_len > 0)
3465 memcpy(ctxt->fetch.data, insn, insn_len);
3466
3467 switch (mode) {
3468 case X86EMUL_MODE_REAL:
3469 case X86EMUL_MODE_VM86:
3470 case X86EMUL_MODE_PROT16:
3471 def_op_bytes = def_ad_bytes = 2;
3472 break;
3473 case X86EMUL_MODE_PROT32:
3474 def_op_bytes = def_ad_bytes = 4;
3475 break;
3476 #ifdef CONFIG_X86_64
3477 case X86EMUL_MODE_PROT64:
3478 def_op_bytes = 4;
3479 def_ad_bytes = 8;
3480 break;
3481 #endif
3482 default:
3483 return EMULATION_FAILED;
3484 }
3485
3486 ctxt->op_bytes = def_op_bytes;
3487 ctxt->ad_bytes = def_ad_bytes;
3488
3489 /* Legacy prefixes. */
3490 for (;;) {
3491 switch (ctxt->b = insn_fetch(u8, ctxt)) {
3492 case 0x66: /* operand-size override */
3493 op_prefix = true;
3494 /* switch between 2/4 bytes */
3495 ctxt->op_bytes = def_op_bytes ^ 6;
3496 break;
3497 case 0x67: /* address-size override */
3498 if (mode == X86EMUL_MODE_PROT64)
3499 /* switch between 4/8 bytes */
3500 ctxt->ad_bytes = def_ad_bytes ^ 12;
3501 else
3502 /* switch between 2/4 bytes */
3503 ctxt->ad_bytes = def_ad_bytes ^ 6;
3504 break;
3505 case 0x26: /* ES override */
3506 case 0x2e: /* CS override */
3507 case 0x36: /* SS override */
3508 case 0x3e: /* DS override */
3509 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3510 break;
3511 case 0x64: /* FS override */
3512 case 0x65: /* GS override */
3513 set_seg_override(ctxt, ctxt->b & 7);
3514 break;
3515 case 0x40 ... 0x4f: /* REX */
3516 if (mode != X86EMUL_MODE_PROT64)
3517 goto done_prefixes;
3518 ctxt->rex_prefix = ctxt->b;
3519 continue;
3520 case 0xf0: /* LOCK */
3521 ctxt->lock_prefix = 1;
3522 break;
3523 case 0xf2: /* REPNE/REPNZ */
3524 case 0xf3: /* REP/REPE/REPZ */
3525 ctxt->rep_prefix = ctxt->b;
3526 break;
3527 default:
3528 goto done_prefixes;
3529 }
3530
3531 /* Any legacy prefix after a REX prefix nullifies its effect. */
3532
3533 ctxt->rex_prefix = 0;
3534 }
3535
3536 done_prefixes:
3537
3538 /* REX prefix. */
3539 if (ctxt->rex_prefix & 8)
3540 ctxt->op_bytes = 8; /* REX.W */
3541
3542 /* Opcode byte(s). */
3543 opcode = opcode_table[ctxt->b];
3544 /* Two-byte opcode? */
3545 if (ctxt->b == 0x0f) {
3546 ctxt->twobyte = 1;
3547 ctxt->b = insn_fetch(u8, ctxt);
3548 opcode = twobyte_table[ctxt->b];
3549 }
3550 ctxt->d = opcode.flags;
3551
3552 while (ctxt->d & GroupMask) {
3553 switch (ctxt->d & GroupMask) {
3554 case Group:
3555 ctxt->modrm = insn_fetch(u8, ctxt);
3556 --ctxt->_eip;
3557 goffset = (ctxt->modrm >> 3) & 7;
3558 opcode = opcode.u.group[goffset];
3559 break;
3560 case GroupDual:
3561 ctxt->modrm = insn_fetch(u8, ctxt);
3562 --ctxt->_eip;
3563 goffset = (ctxt->modrm >> 3) & 7;
3564 if ((ctxt->modrm >> 6) == 3)
3565 opcode = opcode.u.gdual->mod3[goffset];
3566 else
3567 opcode = opcode.u.gdual->mod012[goffset];
3568 break;
3569 case RMExt:
3570 goffset = ctxt->modrm & 7;
3571 opcode = opcode.u.group[goffset];
3572 break;
3573 case Prefix:
3574 if (ctxt->rep_prefix && op_prefix)
3575 return EMULATION_FAILED;
3576 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3577 switch (simd_prefix) {
3578 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3579 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3580 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3581 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3582 }
3583 break;
3584 default:
3585 return EMULATION_FAILED;
3586 }
3587
3588 ctxt->d &= ~(u64)GroupMask;
3589 ctxt->d |= opcode.flags;
3590 }
3591
3592 ctxt->execute = opcode.u.execute;
3593 ctxt->check_perm = opcode.check_perm;
3594 ctxt->intercept = opcode.intercept;
3595
3596 /* Unrecognised? */
3597 if (ctxt->d == 0 || (ctxt->d & Undefined))
3598 return EMULATION_FAILED;
3599
3600 if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3601 return EMULATION_FAILED;
3602
3603 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3604 ctxt->op_bytes = 8;
3605
3606 if (ctxt->d & Op3264) {
3607 if (mode == X86EMUL_MODE_PROT64)
3608 ctxt->op_bytes = 8;
3609 else
3610 ctxt->op_bytes = 4;
3611 }
3612
3613 if (ctxt->d & Sse)
3614 ctxt->op_bytes = 16;
3615
3616 /* ModRM and SIB bytes. */
3617 if (ctxt->d & ModRM) {
3618 rc = decode_modrm(ctxt, &ctxt->memop);
3619 if (!ctxt->has_seg_override)
3620 set_seg_override(ctxt, ctxt->modrm_seg);
3621 } else if (ctxt->d & MemAbs)
3622 rc = decode_abs(ctxt, &ctxt->memop);
3623 if (rc != X86EMUL_CONTINUE)
3624 goto done;
3625
3626 if (!ctxt->has_seg_override)
3627 set_seg_override(ctxt, VCPU_SREG_DS);
3628
3629 ctxt->memop.addr.mem.seg = seg_override(ctxt);
3630
3631 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
3632 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
3633
3634 /*
3635 * Decode and fetch the source operand: register, memory
3636 * or immediate.
3637 */
3638 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
3639 if (rc != X86EMUL_CONTINUE)
3640 goto done;
3641
3642 /*
3643 * Decode and fetch the second source operand: register, memory
3644 * or immediate.
3645 */
3646 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
3647 if (rc != X86EMUL_CONTINUE)
3648 goto done;
3649
3650 /* Decode and fetch the destination operand: register or memory. */
3651 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
3652
3653 done:
3654 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
3655 ctxt->memopp->addr.mem.ea += ctxt->_eip;
3656
3657 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
3658 }
3659
3660 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3661 {
3662 /* The second termination condition only applies for REPE
3663 * and REPNE. Test if the repeat string operation prefix is
3664 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3665 * corresponding termination condition according to:
3666 * - if REPE/REPZ and ZF = 0 then done
3667 * - if REPNE/REPNZ and ZF = 1 then done
3668 */
3669 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3670 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3671 && (((ctxt->rep_prefix == REPE_PREFIX) &&
3672 ((ctxt->eflags & EFLG_ZF) == 0))
3673 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
3674 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3675 return true;
3676
3677 return false;
3678 }
3679
3680 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3681 {
3682 struct x86_emulate_ops *ops = ctxt->ops;
3683 u64 msr_data;
3684 int rc = X86EMUL_CONTINUE;
3685 int saved_dst_type = ctxt->dst.type;
3686
3687 ctxt->mem_read.pos = 0;
3688
3689 if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3690 rc = emulate_ud(ctxt);
3691 goto done;
3692 }
3693
3694 /* LOCK prefix is allowed only with some instructions */
3695 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3696 rc = emulate_ud(ctxt);
3697 goto done;
3698 }
3699
3700 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3701 rc = emulate_ud(ctxt);
3702 goto done;
3703 }
3704
3705 if ((ctxt->d & Sse)
3706 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3707 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3708 rc = emulate_ud(ctxt);
3709 goto done;
3710 }
3711
3712 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3713 rc = emulate_nm(ctxt);
3714 goto done;
3715 }
3716
3717 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3718 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3719 X86_ICPT_PRE_EXCEPT);
3720 if (rc != X86EMUL_CONTINUE)
3721 goto done;
3722 }
3723
3724 /* Privileged instruction can be executed only in CPL=0 */
3725 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3726 rc = emulate_gp(ctxt, 0);
3727 goto done;
3728 }
3729
3730 /* Instruction can only be executed in protected mode */
3731 if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3732 rc = emulate_ud(ctxt);
3733 goto done;
3734 }
3735
3736 /* Do instruction specific permission checks */
3737 if (ctxt->check_perm) {
3738 rc = ctxt->check_perm(ctxt);
3739 if (rc != X86EMUL_CONTINUE)
3740 goto done;
3741 }
3742
3743 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3744 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3745 X86_ICPT_POST_EXCEPT);
3746 if (rc != X86EMUL_CONTINUE)
3747 goto done;
3748 }
3749
3750 if (ctxt->rep_prefix && (ctxt->d & String)) {
3751 /* All REP prefixes have the same first termination condition */
3752 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3753 ctxt->eip = ctxt->_eip;
3754 goto done;
3755 }
3756 }
3757
3758 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3759 rc = segmented_read(ctxt, ctxt->src.addr.mem,
3760 ctxt->src.valptr, ctxt->src.bytes);
3761 if (rc != X86EMUL_CONTINUE)
3762 goto done;
3763 ctxt->src.orig_val64 = ctxt->src.val64;
3764 }
3765
3766 if (ctxt->src2.type == OP_MEM) {
3767 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3768 &ctxt->src2.val, ctxt->src2.bytes);
3769 if (rc != X86EMUL_CONTINUE)
3770 goto done;
3771 }
3772
3773 if ((ctxt->d & DstMask) == ImplicitOps)
3774 goto special_insn;
3775
3776
3777 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3778 /* optimisation - avoid slow emulated read if Mov */
3779 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
3780 &ctxt->dst.val, ctxt->dst.bytes);
3781 if (rc != X86EMUL_CONTINUE)
3782 goto done;
3783 }
3784 ctxt->dst.orig_val = ctxt->dst.val;
3785
3786 special_insn:
3787
3788 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3789 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3790 X86_ICPT_POST_MEMACCESS);
3791 if (rc != X86EMUL_CONTINUE)
3792 goto done;
3793 }
3794
3795 if (ctxt->execute) {
3796 rc = ctxt->execute(ctxt);
3797 if (rc != X86EMUL_CONTINUE)
3798 goto done;
3799 goto writeback;
3800 }
3801
3802 if (ctxt->twobyte)
3803 goto twobyte_insn;
3804
3805 switch (ctxt->b) {
3806 case 0x06: /* push es */
3807 rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
3808 break;
3809 case 0x07: /* pop es */
3810 rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
3811 break;
3812 case 0x0e: /* push cs */
3813 rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
3814 break;
3815 case 0x16: /* push ss */
3816 rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
3817 break;
3818 case 0x17: /* pop ss */
3819 rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
3820 break;
3821 case 0x1e: /* push ds */
3822 rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
3823 break;
3824 case 0x1f: /* pop ds */
3825 rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
3826 break;
3827 case 0x40 ... 0x47: /* inc r16/r32 */
3828 emulate_1op(ctxt, "inc");
3829 break;
3830 case 0x48 ... 0x4f: /* dec r16/r32 */
3831 emulate_1op(ctxt, "dec");
3832 break;
3833 case 0x63: /* movsxd */
3834 if (ctxt->mode != X86EMUL_MODE_PROT64)
3835 goto cannot_emulate;
3836 ctxt->dst.val = (s32) ctxt->src.val;
3837 break;
3838 case 0x6c: /* insb */
3839 case 0x6d: /* insw/insd */
3840 ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
3841 goto do_io_in;
3842 case 0x6e: /* outsb */
3843 case 0x6f: /* outsw/outsd */
3844 ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
3845 goto do_io_out;
3846 break;
3847 case 0x70 ... 0x7f: /* jcc (short) */
3848 if (test_cc(ctxt->b, ctxt->eflags))
3849 jmp_rel(ctxt, ctxt->src.val);
3850 break;
3851 case 0x8d: /* lea r16/r32, m */
3852 ctxt->dst.val = ctxt->src.addr.mem.ea;
3853 break;
3854 case 0x8f: /* pop (sole member of Grp1a) */
3855 rc = em_grp1a(ctxt);
3856 break;
3857 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3858 if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
3859 break;
3860 rc = em_xchg(ctxt);
3861 break;
3862 case 0x98: /* cbw/cwde/cdqe */
3863 switch (ctxt->op_bytes) {
3864 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
3865 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
3866 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
3867 }
3868 break;
3869 case 0xc0 ... 0xc1:
3870 rc = em_grp2(ctxt);
3871 break;
3872 case 0xc4: /* les */
3873 rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
3874 break;
3875 case 0xc5: /* lds */
3876 rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
3877 break;
3878 case 0xcc: /* int3 */
3879 rc = emulate_int(ctxt, 3);
3880 break;
3881 case 0xcd: /* int n */
3882 rc = emulate_int(ctxt, ctxt->src.val);
3883 break;
3884 case 0xce: /* into */
3885 if (ctxt->eflags & EFLG_OF)
3886 rc = emulate_int(ctxt, 4);
3887 break;
3888 case 0xd0 ... 0xd1: /* Grp2 */
3889 rc = em_grp2(ctxt);
3890 break;
3891 case 0xd2 ... 0xd3: /* Grp2 */
3892 ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
3893 rc = em_grp2(ctxt);
3894 break;
3895 case 0xe4: /* inb */
3896 case 0xe5: /* in */
3897 goto do_io_in;
3898 case 0xe6: /* outb */
3899 case 0xe7: /* out */
3900 goto do_io_out;
3901 case 0xe8: /* call (near) */ {
3902 long int rel = ctxt->src.val;
3903 ctxt->src.val = (unsigned long) ctxt->_eip;
3904 jmp_rel(ctxt, rel);
3905 rc = em_push(ctxt);
3906 break;
3907 }
3908 case 0xe9: /* jmp rel */
3909 case 0xeb: /* jmp rel short */
3910 jmp_rel(ctxt, ctxt->src.val);
3911 ctxt->dst.type = OP_NONE; /* Disable writeback. */
3912 break;
3913 case 0xec: /* in al,dx */
3914 case 0xed: /* in (e/r)ax,dx */
3915 do_io_in:
3916 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3917 &ctxt->dst.val))
3918 goto done; /* IO is needed */
3919 break;
3920 case 0xee: /* out dx,al */
3921 case 0xef: /* out dx,(e/r)ax */
3922 do_io_out:
3923 ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3924 &ctxt->src.val, 1);
3925 ctxt->dst.type = OP_NONE; /* Disable writeback. */
3926 break;
3927 case 0xf4: /* hlt */
3928 ctxt->ops->halt(ctxt);
3929 break;
3930 case 0xf5: /* cmc */
3931 /* complement carry flag from eflags reg */
3932 ctxt->eflags ^= EFLG_CF;
3933 break;
3934 case 0xf8: /* clc */
3935 ctxt->eflags &= ~EFLG_CF;
3936 break;
3937 case 0xf9: /* stc */
3938 ctxt->eflags |= EFLG_CF;
3939 break;
3940 case 0xfc: /* cld */
3941 ctxt->eflags &= ~EFLG_DF;
3942 break;
3943 case 0xfd: /* std */
3944 ctxt->eflags |= EFLG_DF;
3945 break;
3946 case 0xfe: /* Grp4 */
3947 rc = em_grp45(ctxt);
3948 break;
3949 case 0xff: /* Grp5 */
3950 rc = em_grp45(ctxt);
3951 break;
3952 default:
3953 goto cannot_emulate;
3954 }
3955
3956 if (rc != X86EMUL_CONTINUE)
3957 goto done;
3958
3959 writeback:
3960 rc = writeback(ctxt);
3961 if (rc != X86EMUL_CONTINUE)
3962 goto done;
3963
3964 /*
3965 * restore dst type in case the decoding will be reused
3966 * (happens for string instruction )
3967 */
3968 ctxt->dst.type = saved_dst_type;
3969
3970 if ((ctxt->d & SrcMask) == SrcSI)
3971 string_addr_inc(ctxt, seg_override(ctxt),
3972 VCPU_REGS_RSI, &ctxt->src);
3973
3974 if ((ctxt->d & DstMask) == DstDI)
3975 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3976 &ctxt->dst);
3977
3978 if (ctxt->rep_prefix && (ctxt->d & String)) {
3979 struct read_cache *r = &ctxt->io_read;
3980 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
3981
3982 if (!string_insn_completed(ctxt)) {
3983 /*
3984 * Re-enter guest when pio read ahead buffer is empty
3985 * or, if it is not used, after each 1024 iteration.
3986 */
3987 if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
3988 (r->end == 0 || r->end != r->pos)) {
3989 /*
3990 * Reset read cache. Usually happens before
3991 * decode, but since instruction is restarted
3992 * we have to do it here.
3993 */
3994 ctxt->mem_read.end = 0;
3995 return EMULATION_RESTART;
3996 }
3997 goto done; /* skip rip writeback */
3998 }
3999 }
4000
4001 ctxt->eip = ctxt->_eip;
4002
4003 done:
4004 if (rc == X86EMUL_PROPAGATE_FAULT)
4005 ctxt->have_exception = true;
4006 if (rc == X86EMUL_INTERCEPTED)
4007 return EMULATION_INTERCEPTED;
4008
4009 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4010
4011 twobyte_insn:
4012 switch (ctxt->b) {
4013 case 0x09: /* wbinvd */
4014 (ctxt->ops->wbinvd)(ctxt);
4015 break;
4016 case 0x08: /* invd */
4017 case 0x0d: /* GrpP (prefetch) */
4018 case 0x18: /* Grp16 (prefetch/nop) */
4019 break;
4020 case 0x20: /* mov cr, reg */
4021 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4022 break;
4023 case 0x21: /* mov from dr to reg */
4024 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4025 break;
4026 case 0x22: /* mov reg, cr */
4027 if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4028 emulate_gp(ctxt, 0);
4029 rc = X86EMUL_PROPAGATE_FAULT;
4030 goto done;
4031 }
4032 ctxt->dst.type = OP_NONE;
4033 break;
4034 case 0x23: /* mov from reg to dr */
4035 if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4036 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4037 ~0ULL : ~0U)) < 0) {
4038 /* #UD condition is already handled by the code above */
4039 emulate_gp(ctxt, 0);
4040 rc = X86EMUL_PROPAGATE_FAULT;
4041 goto done;
4042 }
4043
4044 ctxt->dst.type = OP_NONE; /* no writeback */
4045 break;
4046 case 0x30:
4047 /* wrmsr */
4048 msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4049 | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4050 if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4051 emulate_gp(ctxt, 0);
4052 rc = X86EMUL_PROPAGATE_FAULT;
4053 goto done;
4054 }
4055 rc = X86EMUL_CONTINUE;
4056 break;
4057 case 0x32:
4058 /* rdmsr */
4059 if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4060 emulate_gp(ctxt, 0);
4061 rc = X86EMUL_PROPAGATE_FAULT;
4062 goto done;
4063 } else {
4064 ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4065 ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4066 }
4067 rc = X86EMUL_CONTINUE;
4068 break;
4069 case 0x40 ... 0x4f: /* cmov */
4070 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4071 if (!test_cc(ctxt->b, ctxt->eflags))
4072 ctxt->dst.type = OP_NONE; /* no writeback */
4073 break;
4074 case 0x80 ... 0x8f: /* jnz rel, etc*/
4075 if (test_cc(ctxt->b, ctxt->eflags))
4076 jmp_rel(ctxt, ctxt->src.val);
4077 break;
4078 case 0x90 ... 0x9f: /* setcc r/m8 */
4079 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4080 break;
4081 case 0xa0: /* push fs */
4082 rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
4083 break;
4084 case 0xa1: /* pop fs */
4085 rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
4086 break;
4087 case 0xa3:
4088 bt: /* bt */
4089 ctxt->dst.type = OP_NONE;
4090 /* only subword offset */
4091 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4092 emulate_2op_SrcV_nobyte(ctxt, "bt");
4093 break;
4094 case 0xa4: /* shld imm8, r, r/m */
4095 case 0xa5: /* shld cl, r, r/m */
4096 emulate_2op_cl(ctxt, "shld");
4097 break;
4098 case 0xa8: /* push gs */
4099 rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
4100 break;
4101 case 0xa9: /* pop gs */
4102 rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
4103 break;
4104 case 0xab:
4105 bts: /* bts */
4106 emulate_2op_SrcV_nobyte(ctxt, "bts");
4107 break;
4108 case 0xac: /* shrd imm8, r, r/m */
4109 case 0xad: /* shrd cl, r, r/m */
4110 emulate_2op_cl(ctxt, "shrd");
4111 break;
4112 case 0xae: /* clflush */
4113 break;
4114 case 0xb0 ... 0xb1: /* cmpxchg */
4115 /*
4116 * Save real source value, then compare EAX against
4117 * destination.
4118 */
4119 ctxt->src.orig_val = ctxt->src.val;
4120 ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4121 emulate_2op_SrcV(ctxt, "cmp");
4122 if (ctxt->eflags & EFLG_ZF) {
4123 /* Success: write back to memory. */
4124 ctxt->dst.val = ctxt->src.orig_val;
4125 } else {
4126 /* Failure: write the value we saw to EAX. */
4127 ctxt->dst.type = OP_REG;
4128 ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4129 }
4130 break;
4131 case 0xb2: /* lss */
4132 rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
4133 break;
4134 case 0xb3:
4135 btr: /* btr */
4136 emulate_2op_SrcV_nobyte(ctxt, "btr");
4137 break;
4138 case 0xb4: /* lfs */
4139 rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
4140 break;
4141 case 0xb5: /* lgs */
4142 rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
4143 break;
4144 case 0xb6 ... 0xb7: /* movzx */
4145 ctxt->dst.bytes = ctxt->op_bytes;
4146 ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4147 : (u16) ctxt->src.val;
4148 break;
4149 case 0xba: /* Grp8 */
4150 switch (ctxt->modrm_reg & 3) {
4151 case 0:
4152 goto bt;
4153 case 1:
4154 goto bts;
4155 case 2:
4156 goto btr;
4157 case 3:
4158 goto btc;
4159 }
4160 break;
4161 case 0xbb:
4162 btc: /* btc */
4163 emulate_2op_SrcV_nobyte(ctxt, "btc");
4164 break;
4165 case 0xbc: { /* bsf */
4166 u8 zf;
4167 __asm__ ("bsf %2, %0; setz %1"
4168 : "=r"(ctxt->dst.val), "=q"(zf)
4169 : "r"(ctxt->src.val));
4170 ctxt->eflags &= ~X86_EFLAGS_ZF;
4171 if (zf) {
4172 ctxt->eflags |= X86_EFLAGS_ZF;
4173 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4174 }
4175 break;
4176 }
4177 case 0xbd: { /* bsr */
4178 u8 zf;
4179 __asm__ ("bsr %2, %0; setz %1"
4180 : "=r"(ctxt->dst.val), "=q"(zf)
4181 : "r"(ctxt->src.val));
4182 ctxt->eflags &= ~X86_EFLAGS_ZF;
4183 if (zf) {
4184 ctxt->eflags |= X86_EFLAGS_ZF;
4185 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4186 }
4187 break;
4188 }
4189 case 0xbe ... 0xbf: /* movsx */
4190 ctxt->dst.bytes = ctxt->op_bytes;
4191 ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4192 (s16) ctxt->src.val;
4193 break;
4194 case 0xc0 ... 0xc1: /* xadd */
4195 emulate_2op_SrcV(ctxt, "add");
4196 /* Write back the register source. */
4197 ctxt->src.val = ctxt->dst.orig_val;
4198 write_register_operand(&ctxt->src);
4199 break;
4200 case 0xc3: /* movnti */
4201 ctxt->dst.bytes = ctxt->op_bytes;
4202 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4203 (u64) ctxt->src.val;
4204 break;
4205 case 0xc7: /* Grp9 (cmpxchg8b) */
4206 rc = em_grp9(ctxt);
4207 break;
4208 default:
4209 goto cannot_emulate;
4210 }
4211
4212 if (rc != X86EMUL_CONTINUE)
4213 goto done;
4214
4215 goto writeback;
4216
4217 cannot_emulate:
4218 return EMULATION_FAILED;
4219 }
This page took 0.118309 seconds and 5 git commands to generate.