KVM: x86 emulator: fix INTn emulation not pushing EFLAGS and CS
[deliverable/linux.git] / arch / x86 / kvm / emulate.c
1 /******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23 #ifndef __KERNEL__
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #else
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
32 #endif
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
35
36 #include "x86.h"
37 #include "tss.h"
38
39 /*
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
45 * not be handled.
46 */
47
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<0) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<1) /* Register operand. */
53 #define DstMem (3<<1) /* Memory operand. */
54 #define DstAcc (4<<1) /* Destination Accumulator */
55 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<1) /* 64bit memory operand */
57 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
58 #define DstMask (7<<1)
59 /* Source operand type. */
60 #define SrcNone (0<<4) /* No source operand. */
61 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
62 #define SrcReg (1<<4) /* Register operand. */
63 #define SrcMem (2<<4) /* Memory operand. */
64 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
65 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
66 #define SrcImm (5<<4) /* Immediate operand. */
67 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
68 #define SrcOne (7<<4) /* Implied '1' */
69 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
70 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
71 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
72 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
73 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
74 #define SrcAcc (0xd<<4) /* Source Accumulator */
75 #define SrcMask (0xf<<4)
76 /* Generic ModRM decode. */
77 #define ModRM (1<<8)
78 /* Destination is only written; never read. */
79 #define Mov (1<<9)
80 #define BitOp (1<<10)
81 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
82 #define String (1<<12) /* String instruction (rep capable) */
83 #define Stack (1<<13) /* Stack instruction (push/pop) */
84 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
85 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
86 /* Misc flags */
87 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89 #define Undefined (1<<25) /* No Such Instruction */
90 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
91 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
92 #define No64 (1<<28)
93 /* Source 2 operand type */
94 #define Src2None (0<<29)
95 #define Src2CL (1<<29)
96 #define Src2ImmByte (2<<29)
97 #define Src2One (3<<29)
98 #define Src2Mask (7<<29)
99
100 #define X2(x...) x, x
101 #define X3(x...) X2(x), x
102 #define X4(x...) X2(x), X2(x)
103 #define X5(x...) X4(x), x
104 #define X6(x...) X4(x), X2(x)
105 #define X7(x...) X4(x), X3(x)
106 #define X8(x...) X4(x), X4(x)
107 #define X16(x...) X8(x), X8(x)
108
109 struct opcode {
110 u32 flags;
111 union {
112 int (*execute)(struct x86_emulate_ctxt *ctxt);
113 struct opcode *group;
114 struct group_dual *gdual;
115 } u;
116 };
117
118 struct group_dual {
119 struct opcode mod012[8];
120 struct opcode mod3[8];
121 };
122
123 /* EFLAGS bit definitions. */
124 #define EFLG_ID (1<<21)
125 #define EFLG_VIP (1<<20)
126 #define EFLG_VIF (1<<19)
127 #define EFLG_AC (1<<18)
128 #define EFLG_VM (1<<17)
129 #define EFLG_RF (1<<16)
130 #define EFLG_IOPL (3<<12)
131 #define EFLG_NT (1<<14)
132 #define EFLG_OF (1<<11)
133 #define EFLG_DF (1<<10)
134 #define EFLG_IF (1<<9)
135 #define EFLG_TF (1<<8)
136 #define EFLG_SF (1<<7)
137 #define EFLG_ZF (1<<6)
138 #define EFLG_AF (1<<4)
139 #define EFLG_PF (1<<2)
140 #define EFLG_CF (1<<0)
141
142 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
143 #define EFLG_RESERVED_ONE_MASK 2
144
145 /*
146 * Instruction emulation:
147 * Most instructions are emulated directly via a fragment of inline assembly
148 * code. This allows us to save/restore EFLAGS and thus very easily pick up
149 * any modified flags.
150 */
151
152 #if defined(CONFIG_X86_64)
153 #define _LO32 "k" /* force 32-bit operand */
154 #define _STK "%%rsp" /* stack pointer */
155 #elif defined(__i386__)
156 #define _LO32 "" /* force 32-bit operand */
157 #define _STK "%%esp" /* stack pointer */
158 #endif
159
160 /*
161 * These EFLAGS bits are restored from saved value during emulation, and
162 * any changes are written back to the saved value after emulation.
163 */
164 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
165
166 /* Before executing instruction: restore necessary bits in EFLAGS. */
167 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
168 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
169 "movl %"_sav",%"_LO32 _tmp"; " \
170 "push %"_tmp"; " \
171 "push %"_tmp"; " \
172 "movl %"_msk",%"_LO32 _tmp"; " \
173 "andl %"_LO32 _tmp",("_STK"); " \
174 "pushf; " \
175 "notl %"_LO32 _tmp"; " \
176 "andl %"_LO32 _tmp",("_STK"); " \
177 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
178 "pop %"_tmp"; " \
179 "orl %"_LO32 _tmp",("_STK"); " \
180 "popf; " \
181 "pop %"_sav"; "
182
183 /* After executing instruction: write-back necessary bits in EFLAGS. */
184 #define _POST_EFLAGS(_sav, _msk, _tmp) \
185 /* _sav |= EFLAGS & _msk; */ \
186 "pushf; " \
187 "pop %"_tmp"; " \
188 "andl %"_msk",%"_LO32 _tmp"; " \
189 "orl %"_LO32 _tmp",%"_sav"; "
190
191 #ifdef CONFIG_X86_64
192 #define ON64(x) x
193 #else
194 #define ON64(x)
195 #endif
196
197 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
198 do { \
199 __asm__ __volatile__ ( \
200 _PRE_EFLAGS("0", "4", "2") \
201 _op _suffix " %"_x"3,%1; " \
202 _POST_EFLAGS("0", "4", "2") \
203 : "=m" (_eflags), "=m" ((_dst).val), \
204 "=&r" (_tmp) \
205 : _y ((_src).val), "i" (EFLAGS_MASK)); \
206 } while (0)
207
208
209 /* Raw emulation: instruction has two explicit operands. */
210 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
211 do { \
212 unsigned long _tmp; \
213 \
214 switch ((_dst).bytes) { \
215 case 2: \
216 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
217 break; \
218 case 4: \
219 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
220 break; \
221 case 8: \
222 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
223 break; \
224 } \
225 } while (0)
226
227 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
228 do { \
229 unsigned long _tmp; \
230 switch ((_dst).bytes) { \
231 case 1: \
232 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
233 break; \
234 default: \
235 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
236 _wx, _wy, _lx, _ly, _qx, _qy); \
237 break; \
238 } \
239 } while (0)
240
241 /* Source operand is byte-sized and may be restricted to just %cl. */
242 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
243 __emulate_2op(_op, _src, _dst, _eflags, \
244 "b", "c", "b", "c", "b", "c", "b", "c")
245
246 /* Source operand is byte, word, long or quad sized. */
247 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
248 __emulate_2op(_op, _src, _dst, _eflags, \
249 "b", "q", "w", "r", _LO32, "r", "", "r")
250
251 /* Source operand is word, long or quad sized. */
252 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
253 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
254 "w", "r", _LO32, "r", "", "r")
255
256 /* Instruction has three operands and one operand is stored in ECX register */
257 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
258 do { \
259 unsigned long _tmp; \
260 _type _clv = (_cl).val; \
261 _type _srcv = (_src).val; \
262 _type _dstv = (_dst).val; \
263 \
264 __asm__ __volatile__ ( \
265 _PRE_EFLAGS("0", "5", "2") \
266 _op _suffix " %4,%1 \n" \
267 _POST_EFLAGS("0", "5", "2") \
268 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
269 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
270 ); \
271 \
272 (_cl).val = (unsigned long) _clv; \
273 (_src).val = (unsigned long) _srcv; \
274 (_dst).val = (unsigned long) _dstv; \
275 } while (0)
276
277 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
278 do { \
279 switch ((_dst).bytes) { \
280 case 2: \
281 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
282 "w", unsigned short); \
283 break; \
284 case 4: \
285 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
286 "l", unsigned int); \
287 break; \
288 case 8: \
289 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
290 "q", unsigned long)); \
291 break; \
292 } \
293 } while (0)
294
295 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
296 do { \
297 unsigned long _tmp; \
298 \
299 __asm__ __volatile__ ( \
300 _PRE_EFLAGS("0", "3", "2") \
301 _op _suffix " %1; " \
302 _POST_EFLAGS("0", "3", "2") \
303 : "=m" (_eflags), "+m" ((_dst).val), \
304 "=&r" (_tmp) \
305 : "i" (EFLAGS_MASK)); \
306 } while (0)
307
308 /* Instruction has only one explicit operand (no source operand). */
309 #define emulate_1op(_op, _dst, _eflags) \
310 do { \
311 switch ((_dst).bytes) { \
312 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
313 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
314 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
315 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
316 } \
317 } while (0)
318
319 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
320 do { \
321 unsigned long _tmp; \
322 \
323 __asm__ __volatile__ ( \
324 _PRE_EFLAGS("0", "4", "1") \
325 _op _suffix " %5; " \
326 _POST_EFLAGS("0", "4", "1") \
327 : "=m" (_eflags), "=&r" (_tmp), \
328 "+a" (_rax), "+d" (_rdx) \
329 : "i" (EFLAGS_MASK), "m" ((_src).val), \
330 "a" (_rax), "d" (_rdx)); \
331 } while (0)
332
333 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
334 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
335 do { \
336 switch((_src).bytes) { \
337 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
338 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
339 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
340 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
341 } \
342 } while (0)
343
344 /* Fetch next part of the instruction being emulated. */
345 #define insn_fetch(_type, _size, _eip) \
346 ({ unsigned long _x; \
347 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
348 if (rc != X86EMUL_CONTINUE) \
349 goto done; \
350 (_eip) += (_size); \
351 (_type)_x; \
352 })
353
354 #define insn_fetch_arr(_arr, _size, _eip) \
355 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
356 if (rc != X86EMUL_CONTINUE) \
357 goto done; \
358 (_eip) += (_size); \
359 })
360
361 static inline unsigned long ad_mask(struct decode_cache *c)
362 {
363 return (1UL << (c->ad_bytes << 3)) - 1;
364 }
365
366 /* Access/update address held in a register, based on addressing mode. */
367 static inline unsigned long
368 address_mask(struct decode_cache *c, unsigned long reg)
369 {
370 if (c->ad_bytes == sizeof(unsigned long))
371 return reg;
372 else
373 return reg & ad_mask(c);
374 }
375
376 static inline unsigned long
377 register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
378 {
379 return base + address_mask(c, reg);
380 }
381
382 static inline void
383 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
384 {
385 if (c->ad_bytes == sizeof(unsigned long))
386 *reg += inc;
387 else
388 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
389 }
390
391 static inline void jmp_rel(struct decode_cache *c, int rel)
392 {
393 register_address_increment(c, &c->eip, rel);
394 }
395
396 static void set_seg_override(struct decode_cache *c, int seg)
397 {
398 c->has_seg_override = true;
399 c->seg_override = seg;
400 }
401
402 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
403 struct x86_emulate_ops *ops, int seg)
404 {
405 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
406 return 0;
407
408 return ops->get_cached_segment_base(seg, ctxt->vcpu);
409 }
410
411 static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
412 struct x86_emulate_ops *ops,
413 struct decode_cache *c)
414 {
415 if (!c->has_seg_override)
416 return 0;
417
418 return seg_base(ctxt, ops, c->seg_override);
419 }
420
421 static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
422 struct x86_emulate_ops *ops)
423 {
424 return seg_base(ctxt, ops, VCPU_SREG_ES);
425 }
426
427 static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
428 struct x86_emulate_ops *ops)
429 {
430 return seg_base(ctxt, ops, VCPU_SREG_SS);
431 }
432
433 static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
434 u32 error, bool valid)
435 {
436 ctxt->exception = vec;
437 ctxt->error_code = error;
438 ctxt->error_code_valid = valid;
439 ctxt->restart = false;
440 }
441
442 static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
443 {
444 emulate_exception(ctxt, GP_VECTOR, err, true);
445 }
446
447 static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
448 int err)
449 {
450 ctxt->cr2 = addr;
451 emulate_exception(ctxt, PF_VECTOR, err, true);
452 }
453
454 static void emulate_ud(struct x86_emulate_ctxt *ctxt)
455 {
456 emulate_exception(ctxt, UD_VECTOR, 0, false);
457 }
458
459 static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
460 {
461 emulate_exception(ctxt, TS_VECTOR, err, true);
462 }
463
464 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
465 struct x86_emulate_ops *ops,
466 unsigned long eip, u8 *dest)
467 {
468 struct fetch_cache *fc = &ctxt->decode.fetch;
469 int rc;
470 int size, cur_size;
471
472 if (eip == fc->end) {
473 cur_size = fc->end - fc->start;
474 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
475 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
476 size, ctxt->vcpu, NULL);
477 if (rc != X86EMUL_CONTINUE)
478 return rc;
479 fc->end += size;
480 }
481 *dest = fc->data[eip - fc->start];
482 return X86EMUL_CONTINUE;
483 }
484
485 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
486 struct x86_emulate_ops *ops,
487 unsigned long eip, void *dest, unsigned size)
488 {
489 int rc;
490
491 /* x86 instructions are limited to 15 bytes. */
492 if (eip + size - ctxt->eip > 15)
493 return X86EMUL_UNHANDLEABLE;
494 while (size--) {
495 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
496 if (rc != X86EMUL_CONTINUE)
497 return rc;
498 }
499 return X86EMUL_CONTINUE;
500 }
501
502 /*
503 * Given the 'reg' portion of a ModRM byte, and a register block, return a
504 * pointer into the block that addresses the relevant register.
505 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
506 */
507 static void *decode_register(u8 modrm_reg, unsigned long *regs,
508 int highbyte_regs)
509 {
510 void *p;
511
512 p = &regs[modrm_reg];
513 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
514 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
515 return p;
516 }
517
518 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
519 struct x86_emulate_ops *ops,
520 ulong addr,
521 u16 *size, unsigned long *address, int op_bytes)
522 {
523 int rc;
524
525 if (op_bytes == 2)
526 op_bytes = 3;
527 *address = 0;
528 rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
529 if (rc != X86EMUL_CONTINUE)
530 return rc;
531 rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
532 return rc;
533 }
534
535 static int test_cc(unsigned int condition, unsigned int flags)
536 {
537 int rc = 0;
538
539 switch ((condition & 15) >> 1) {
540 case 0: /* o */
541 rc |= (flags & EFLG_OF);
542 break;
543 case 1: /* b/c/nae */
544 rc |= (flags & EFLG_CF);
545 break;
546 case 2: /* z/e */
547 rc |= (flags & EFLG_ZF);
548 break;
549 case 3: /* be/na */
550 rc |= (flags & (EFLG_CF|EFLG_ZF));
551 break;
552 case 4: /* s */
553 rc |= (flags & EFLG_SF);
554 break;
555 case 5: /* p/pe */
556 rc |= (flags & EFLG_PF);
557 break;
558 case 7: /* le/ng */
559 rc |= (flags & EFLG_ZF);
560 /* fall through */
561 case 6: /* l/nge */
562 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
563 break;
564 }
565
566 /* Odd condition identifiers (lsb == 1) have inverted sense. */
567 return (!!rc ^ (condition & 1));
568 }
569
570 static void fetch_register_operand(struct operand *op)
571 {
572 switch (op->bytes) {
573 case 1:
574 op->val = *(u8 *)op->addr.reg;
575 break;
576 case 2:
577 op->val = *(u16 *)op->addr.reg;
578 break;
579 case 4:
580 op->val = *(u32 *)op->addr.reg;
581 break;
582 case 8:
583 op->val = *(u64 *)op->addr.reg;
584 break;
585 }
586 }
587
588 static void decode_register_operand(struct operand *op,
589 struct decode_cache *c,
590 int inhibit_bytereg)
591 {
592 unsigned reg = c->modrm_reg;
593 int highbyte_regs = c->rex_prefix == 0;
594
595 if (!(c->d & ModRM))
596 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
597 op->type = OP_REG;
598 if ((c->d & ByteOp) && !inhibit_bytereg) {
599 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
600 op->bytes = 1;
601 } else {
602 op->addr.reg = decode_register(reg, c->regs, 0);
603 op->bytes = c->op_bytes;
604 }
605 fetch_register_operand(op);
606 op->orig_val = op->val;
607 }
608
609 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
610 struct x86_emulate_ops *ops,
611 struct operand *op)
612 {
613 struct decode_cache *c = &ctxt->decode;
614 u8 sib;
615 int index_reg = 0, base_reg = 0, scale;
616 int rc = X86EMUL_CONTINUE;
617 ulong modrm_ea = 0;
618
619 if (c->rex_prefix) {
620 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
621 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
622 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
623 }
624
625 c->modrm = insn_fetch(u8, 1, c->eip);
626 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
627 c->modrm_reg |= (c->modrm & 0x38) >> 3;
628 c->modrm_rm |= (c->modrm & 0x07);
629 c->modrm_seg = VCPU_SREG_DS;
630
631 if (c->modrm_mod == 3) {
632 op->type = OP_REG;
633 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
634 op->addr.reg = decode_register(c->modrm_rm,
635 c->regs, c->d & ByteOp);
636 fetch_register_operand(op);
637 return rc;
638 }
639
640 op->type = OP_MEM;
641
642 if (c->ad_bytes == 2) {
643 unsigned bx = c->regs[VCPU_REGS_RBX];
644 unsigned bp = c->regs[VCPU_REGS_RBP];
645 unsigned si = c->regs[VCPU_REGS_RSI];
646 unsigned di = c->regs[VCPU_REGS_RDI];
647
648 /* 16-bit ModR/M decode. */
649 switch (c->modrm_mod) {
650 case 0:
651 if (c->modrm_rm == 6)
652 modrm_ea += insn_fetch(u16, 2, c->eip);
653 break;
654 case 1:
655 modrm_ea += insn_fetch(s8, 1, c->eip);
656 break;
657 case 2:
658 modrm_ea += insn_fetch(u16, 2, c->eip);
659 break;
660 }
661 switch (c->modrm_rm) {
662 case 0:
663 modrm_ea += bx + si;
664 break;
665 case 1:
666 modrm_ea += bx + di;
667 break;
668 case 2:
669 modrm_ea += bp + si;
670 break;
671 case 3:
672 modrm_ea += bp + di;
673 break;
674 case 4:
675 modrm_ea += si;
676 break;
677 case 5:
678 modrm_ea += di;
679 break;
680 case 6:
681 if (c->modrm_mod != 0)
682 modrm_ea += bp;
683 break;
684 case 7:
685 modrm_ea += bx;
686 break;
687 }
688 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
689 (c->modrm_rm == 6 && c->modrm_mod != 0))
690 c->modrm_seg = VCPU_SREG_SS;
691 modrm_ea = (u16)modrm_ea;
692 } else {
693 /* 32/64-bit ModR/M decode. */
694 if ((c->modrm_rm & 7) == 4) {
695 sib = insn_fetch(u8, 1, c->eip);
696 index_reg |= (sib >> 3) & 7;
697 base_reg |= sib & 7;
698 scale = sib >> 6;
699
700 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
701 modrm_ea += insn_fetch(s32, 4, c->eip);
702 else
703 modrm_ea += c->regs[base_reg];
704 if (index_reg != 4)
705 modrm_ea += c->regs[index_reg] << scale;
706 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
707 if (ctxt->mode == X86EMUL_MODE_PROT64)
708 c->rip_relative = 1;
709 } else
710 modrm_ea += c->regs[c->modrm_rm];
711 switch (c->modrm_mod) {
712 case 0:
713 if (c->modrm_rm == 5)
714 modrm_ea += insn_fetch(s32, 4, c->eip);
715 break;
716 case 1:
717 modrm_ea += insn_fetch(s8, 1, c->eip);
718 break;
719 case 2:
720 modrm_ea += insn_fetch(s32, 4, c->eip);
721 break;
722 }
723 }
724 op->addr.mem = modrm_ea;
725 done:
726 return rc;
727 }
728
729 static int decode_abs(struct x86_emulate_ctxt *ctxt,
730 struct x86_emulate_ops *ops,
731 struct operand *op)
732 {
733 struct decode_cache *c = &ctxt->decode;
734 int rc = X86EMUL_CONTINUE;
735
736 op->type = OP_MEM;
737 switch (c->ad_bytes) {
738 case 2:
739 op->addr.mem = insn_fetch(u16, 2, c->eip);
740 break;
741 case 4:
742 op->addr.mem = insn_fetch(u32, 4, c->eip);
743 break;
744 case 8:
745 op->addr.mem = insn_fetch(u64, 8, c->eip);
746 break;
747 }
748 done:
749 return rc;
750 }
751
752 static void fetch_bit_operand(struct decode_cache *c)
753 {
754 long sv, mask;
755
756 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
757 mask = ~(c->dst.bytes * 8 - 1);
758
759 if (c->src.bytes == 2)
760 sv = (s16)c->src.val & (s16)mask;
761 else if (c->src.bytes == 4)
762 sv = (s32)c->src.val & (s32)mask;
763
764 c->dst.addr.mem += (sv >> 3);
765 }
766
767 /* only subword offset */
768 c->src.val &= (c->dst.bytes << 3) - 1;
769 }
770
771 static int read_emulated(struct x86_emulate_ctxt *ctxt,
772 struct x86_emulate_ops *ops,
773 unsigned long addr, void *dest, unsigned size)
774 {
775 int rc;
776 struct read_cache *mc = &ctxt->decode.mem_read;
777 u32 err;
778
779 while (size) {
780 int n = min(size, 8u);
781 size -= n;
782 if (mc->pos < mc->end)
783 goto read_cached;
784
785 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
786 ctxt->vcpu);
787 if (rc == X86EMUL_PROPAGATE_FAULT)
788 emulate_pf(ctxt, addr, err);
789 if (rc != X86EMUL_CONTINUE)
790 return rc;
791 mc->end += n;
792
793 read_cached:
794 memcpy(dest, mc->data + mc->pos, n);
795 mc->pos += n;
796 dest += n;
797 addr += n;
798 }
799 return X86EMUL_CONTINUE;
800 }
801
802 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
803 struct x86_emulate_ops *ops,
804 unsigned int size, unsigned short port,
805 void *dest)
806 {
807 struct read_cache *rc = &ctxt->decode.io_read;
808
809 if (rc->pos == rc->end) { /* refill pio read ahead */
810 struct decode_cache *c = &ctxt->decode;
811 unsigned int in_page, n;
812 unsigned int count = c->rep_prefix ?
813 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
814 in_page = (ctxt->eflags & EFLG_DF) ?
815 offset_in_page(c->regs[VCPU_REGS_RDI]) :
816 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
817 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
818 count);
819 if (n == 0)
820 n = 1;
821 rc->pos = rc->end = 0;
822 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
823 return 0;
824 rc->end = n * size;
825 }
826
827 memcpy(dest, rc->data + rc->pos, size);
828 rc->pos += size;
829 return 1;
830 }
831
832 static u32 desc_limit_scaled(struct desc_struct *desc)
833 {
834 u32 limit = get_desc_limit(desc);
835
836 return desc->g ? (limit << 12) | 0xfff : limit;
837 }
838
839 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
840 struct x86_emulate_ops *ops,
841 u16 selector, struct desc_ptr *dt)
842 {
843 if (selector & 1 << 2) {
844 struct desc_struct desc;
845 memset (dt, 0, sizeof *dt);
846 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
847 return;
848
849 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
850 dt->address = get_desc_base(&desc);
851 } else
852 ops->get_gdt(dt, ctxt->vcpu);
853 }
854
855 /* allowed just for 8 bytes segments */
856 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
857 struct x86_emulate_ops *ops,
858 u16 selector, struct desc_struct *desc)
859 {
860 struct desc_ptr dt;
861 u16 index = selector >> 3;
862 int ret;
863 u32 err;
864 ulong addr;
865
866 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
867
868 if (dt.size < index * 8 + 7) {
869 emulate_gp(ctxt, selector & 0xfffc);
870 return X86EMUL_PROPAGATE_FAULT;
871 }
872 addr = dt.address + index * 8;
873 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
874 if (ret == X86EMUL_PROPAGATE_FAULT)
875 emulate_pf(ctxt, addr, err);
876
877 return ret;
878 }
879
880 /* allowed just for 8 bytes segments */
881 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
882 struct x86_emulate_ops *ops,
883 u16 selector, struct desc_struct *desc)
884 {
885 struct desc_ptr dt;
886 u16 index = selector >> 3;
887 u32 err;
888 ulong addr;
889 int ret;
890
891 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
892
893 if (dt.size < index * 8 + 7) {
894 emulate_gp(ctxt, selector & 0xfffc);
895 return X86EMUL_PROPAGATE_FAULT;
896 }
897
898 addr = dt.address + index * 8;
899 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
900 if (ret == X86EMUL_PROPAGATE_FAULT)
901 emulate_pf(ctxt, addr, err);
902
903 return ret;
904 }
905
906 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
907 struct x86_emulate_ops *ops,
908 u16 selector, int seg)
909 {
910 struct desc_struct seg_desc;
911 u8 dpl, rpl, cpl;
912 unsigned err_vec = GP_VECTOR;
913 u32 err_code = 0;
914 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
915 int ret;
916
917 memset(&seg_desc, 0, sizeof seg_desc);
918
919 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
920 || ctxt->mode == X86EMUL_MODE_REAL) {
921 /* set real mode segment descriptor */
922 set_desc_base(&seg_desc, selector << 4);
923 set_desc_limit(&seg_desc, 0xffff);
924 seg_desc.type = 3;
925 seg_desc.p = 1;
926 seg_desc.s = 1;
927 goto load;
928 }
929
930 /* NULL selector is not valid for TR, CS and SS */
931 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
932 && null_selector)
933 goto exception;
934
935 /* TR should be in GDT only */
936 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
937 goto exception;
938
939 if (null_selector) /* for NULL selector skip all following checks */
940 goto load;
941
942 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
943 if (ret != X86EMUL_CONTINUE)
944 return ret;
945
946 err_code = selector & 0xfffc;
947 err_vec = GP_VECTOR;
948
949 /* can't load system descriptor into segment selecor */
950 if (seg <= VCPU_SREG_GS && !seg_desc.s)
951 goto exception;
952
953 if (!seg_desc.p) {
954 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
955 goto exception;
956 }
957
958 rpl = selector & 3;
959 dpl = seg_desc.dpl;
960 cpl = ops->cpl(ctxt->vcpu);
961
962 switch (seg) {
963 case VCPU_SREG_SS:
964 /*
965 * segment is not a writable data segment or segment
966 * selector's RPL != CPL or segment selector's RPL != CPL
967 */
968 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
969 goto exception;
970 break;
971 case VCPU_SREG_CS:
972 if (!(seg_desc.type & 8))
973 goto exception;
974
975 if (seg_desc.type & 4) {
976 /* conforming */
977 if (dpl > cpl)
978 goto exception;
979 } else {
980 /* nonconforming */
981 if (rpl > cpl || dpl != cpl)
982 goto exception;
983 }
984 /* CS(RPL) <- CPL */
985 selector = (selector & 0xfffc) | cpl;
986 break;
987 case VCPU_SREG_TR:
988 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
989 goto exception;
990 break;
991 case VCPU_SREG_LDTR:
992 if (seg_desc.s || seg_desc.type != 2)
993 goto exception;
994 break;
995 default: /* DS, ES, FS, or GS */
996 /*
997 * segment is not a data or readable code segment or
998 * ((segment is a data or nonconforming code segment)
999 * and (both RPL and CPL > DPL))
1000 */
1001 if ((seg_desc.type & 0xa) == 0x8 ||
1002 (((seg_desc.type & 0xc) != 0xc) &&
1003 (rpl > dpl && cpl > dpl)))
1004 goto exception;
1005 break;
1006 }
1007
1008 if (seg_desc.s) {
1009 /* mark segment as accessed */
1010 seg_desc.type |= 1;
1011 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1012 if (ret != X86EMUL_CONTINUE)
1013 return ret;
1014 }
1015 load:
1016 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1017 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1018 return X86EMUL_CONTINUE;
1019 exception:
1020 emulate_exception(ctxt, err_vec, err_code, true);
1021 return X86EMUL_PROPAGATE_FAULT;
1022 }
1023
1024 static void write_register_operand(struct operand *op)
1025 {
1026 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1027 switch (op->bytes) {
1028 case 1:
1029 *(u8 *)op->addr.reg = (u8)op->val;
1030 break;
1031 case 2:
1032 *(u16 *)op->addr.reg = (u16)op->val;
1033 break;
1034 case 4:
1035 *op->addr.reg = (u32)op->val;
1036 break; /* 64b: zero-extend */
1037 case 8:
1038 *op->addr.reg = op->val;
1039 break;
1040 }
1041 }
1042
1043 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1044 struct x86_emulate_ops *ops)
1045 {
1046 int rc;
1047 struct decode_cache *c = &ctxt->decode;
1048 u32 err;
1049
1050 switch (c->dst.type) {
1051 case OP_REG:
1052 write_register_operand(&c->dst);
1053 break;
1054 case OP_MEM:
1055 if (c->lock_prefix)
1056 rc = ops->cmpxchg_emulated(
1057 c->dst.addr.mem,
1058 &c->dst.orig_val,
1059 &c->dst.val,
1060 c->dst.bytes,
1061 &err,
1062 ctxt->vcpu);
1063 else
1064 rc = ops->write_emulated(
1065 c->dst.addr.mem,
1066 &c->dst.val,
1067 c->dst.bytes,
1068 &err,
1069 ctxt->vcpu);
1070 if (rc == X86EMUL_PROPAGATE_FAULT)
1071 emulate_pf(ctxt, c->dst.addr.mem, err);
1072 if (rc != X86EMUL_CONTINUE)
1073 return rc;
1074 break;
1075 case OP_NONE:
1076 /* no writeback */
1077 break;
1078 default:
1079 break;
1080 }
1081 return X86EMUL_CONTINUE;
1082 }
1083
1084 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1085 struct x86_emulate_ops *ops)
1086 {
1087 struct decode_cache *c = &ctxt->decode;
1088
1089 c->dst.type = OP_MEM;
1090 c->dst.bytes = c->op_bytes;
1091 c->dst.val = c->src.val;
1092 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1093 c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
1094 c->regs[VCPU_REGS_RSP]);
1095 }
1096
1097 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1098 struct x86_emulate_ops *ops,
1099 void *dest, int len)
1100 {
1101 struct decode_cache *c = &ctxt->decode;
1102 int rc;
1103
1104 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1105 c->regs[VCPU_REGS_RSP]),
1106 dest, len);
1107 if (rc != X86EMUL_CONTINUE)
1108 return rc;
1109
1110 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1111 return rc;
1112 }
1113
1114 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1115 struct x86_emulate_ops *ops,
1116 void *dest, int len)
1117 {
1118 int rc;
1119 unsigned long val, change_mask;
1120 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1121 int cpl = ops->cpl(ctxt->vcpu);
1122
1123 rc = emulate_pop(ctxt, ops, &val, len);
1124 if (rc != X86EMUL_CONTINUE)
1125 return rc;
1126
1127 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1128 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1129
1130 switch(ctxt->mode) {
1131 case X86EMUL_MODE_PROT64:
1132 case X86EMUL_MODE_PROT32:
1133 case X86EMUL_MODE_PROT16:
1134 if (cpl == 0)
1135 change_mask |= EFLG_IOPL;
1136 if (cpl <= iopl)
1137 change_mask |= EFLG_IF;
1138 break;
1139 case X86EMUL_MODE_VM86:
1140 if (iopl < 3) {
1141 emulate_gp(ctxt, 0);
1142 return X86EMUL_PROPAGATE_FAULT;
1143 }
1144 change_mask |= EFLG_IF;
1145 break;
1146 default: /* real mode */
1147 change_mask |= (EFLG_IOPL | EFLG_IF);
1148 break;
1149 }
1150
1151 *(unsigned long *)dest =
1152 (ctxt->eflags & ~change_mask) | (val & change_mask);
1153
1154 return rc;
1155 }
1156
1157 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1158 struct x86_emulate_ops *ops, int seg)
1159 {
1160 struct decode_cache *c = &ctxt->decode;
1161
1162 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1163
1164 emulate_push(ctxt, ops);
1165 }
1166
1167 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1168 struct x86_emulate_ops *ops, int seg)
1169 {
1170 struct decode_cache *c = &ctxt->decode;
1171 unsigned long selector;
1172 int rc;
1173
1174 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1175 if (rc != X86EMUL_CONTINUE)
1176 return rc;
1177
1178 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1179 return rc;
1180 }
1181
1182 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1183 struct x86_emulate_ops *ops)
1184 {
1185 struct decode_cache *c = &ctxt->decode;
1186 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1187 int rc = X86EMUL_CONTINUE;
1188 int reg = VCPU_REGS_RAX;
1189
1190 while (reg <= VCPU_REGS_RDI) {
1191 (reg == VCPU_REGS_RSP) ?
1192 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1193
1194 emulate_push(ctxt, ops);
1195
1196 rc = writeback(ctxt, ops);
1197 if (rc != X86EMUL_CONTINUE)
1198 return rc;
1199
1200 ++reg;
1201 }
1202
1203 /* Disable writeback. */
1204 c->dst.type = OP_NONE;
1205
1206 return rc;
1207 }
1208
1209 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1210 struct x86_emulate_ops *ops)
1211 {
1212 struct decode_cache *c = &ctxt->decode;
1213 int rc = X86EMUL_CONTINUE;
1214 int reg = VCPU_REGS_RDI;
1215
1216 while (reg >= VCPU_REGS_RAX) {
1217 if (reg == VCPU_REGS_RSP) {
1218 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1219 c->op_bytes);
1220 --reg;
1221 }
1222
1223 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1224 if (rc != X86EMUL_CONTINUE)
1225 break;
1226 --reg;
1227 }
1228 return rc;
1229 }
1230
1231 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1232 struct x86_emulate_ops *ops, int irq)
1233 {
1234 struct decode_cache *c = &ctxt->decode;
1235 int rc;
1236 struct desc_ptr dt;
1237 gva_t cs_addr;
1238 gva_t eip_addr;
1239 u16 cs, eip;
1240 u32 err;
1241
1242 /* TODO: Add limit checks */
1243 c->src.val = ctxt->eflags;
1244 emulate_push(ctxt, ops);
1245 rc = writeback(ctxt, ops);
1246 if (rc != X86EMUL_CONTINUE)
1247 return rc;
1248
1249 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1250
1251 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1252 emulate_push(ctxt, ops);
1253 rc = writeback(ctxt, ops);
1254 if (rc != X86EMUL_CONTINUE)
1255 return rc;
1256
1257 c->src.val = c->eip;
1258 emulate_push(ctxt, ops);
1259 rc = writeback(ctxt, ops);
1260 if (rc != X86EMUL_CONTINUE)
1261 return rc;
1262
1263 c->dst.type = OP_NONE;
1264
1265 ops->get_idt(&dt, ctxt->vcpu);
1266
1267 eip_addr = dt.address + (irq << 2);
1268 cs_addr = dt.address + (irq << 2) + 2;
1269
1270 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
1271 if (rc != X86EMUL_CONTINUE)
1272 return rc;
1273
1274 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
1275 if (rc != X86EMUL_CONTINUE)
1276 return rc;
1277
1278 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1279 if (rc != X86EMUL_CONTINUE)
1280 return rc;
1281
1282 c->eip = eip;
1283
1284 return rc;
1285 }
1286
1287 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1288 struct x86_emulate_ops *ops, int irq)
1289 {
1290 switch(ctxt->mode) {
1291 case X86EMUL_MODE_REAL:
1292 return emulate_int_real(ctxt, ops, irq);
1293 case X86EMUL_MODE_VM86:
1294 case X86EMUL_MODE_PROT16:
1295 case X86EMUL_MODE_PROT32:
1296 case X86EMUL_MODE_PROT64:
1297 default:
1298 /* Protected mode interrupts unimplemented yet */
1299 return X86EMUL_UNHANDLEABLE;
1300 }
1301 }
1302
1303 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1304 struct x86_emulate_ops *ops)
1305 {
1306 struct decode_cache *c = &ctxt->decode;
1307 int rc = X86EMUL_CONTINUE;
1308 unsigned long temp_eip = 0;
1309 unsigned long temp_eflags = 0;
1310 unsigned long cs = 0;
1311 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1312 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1313 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1314 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1315
1316 /* TODO: Add stack limit check */
1317
1318 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1319
1320 if (rc != X86EMUL_CONTINUE)
1321 return rc;
1322
1323 if (temp_eip & ~0xffff) {
1324 emulate_gp(ctxt, 0);
1325 return X86EMUL_PROPAGATE_FAULT;
1326 }
1327
1328 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1329
1330 if (rc != X86EMUL_CONTINUE)
1331 return rc;
1332
1333 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1334
1335 if (rc != X86EMUL_CONTINUE)
1336 return rc;
1337
1338 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1339
1340 if (rc != X86EMUL_CONTINUE)
1341 return rc;
1342
1343 c->eip = temp_eip;
1344
1345
1346 if (c->op_bytes == 4)
1347 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1348 else if (c->op_bytes == 2) {
1349 ctxt->eflags &= ~0xffff;
1350 ctxt->eflags |= temp_eflags;
1351 }
1352
1353 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1354 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1355
1356 return rc;
1357 }
1358
1359 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1360 struct x86_emulate_ops* ops)
1361 {
1362 switch(ctxt->mode) {
1363 case X86EMUL_MODE_REAL:
1364 return emulate_iret_real(ctxt, ops);
1365 case X86EMUL_MODE_VM86:
1366 case X86EMUL_MODE_PROT16:
1367 case X86EMUL_MODE_PROT32:
1368 case X86EMUL_MODE_PROT64:
1369 default:
1370 /* iret from protected mode unimplemented yet */
1371 return X86EMUL_UNHANDLEABLE;
1372 }
1373 }
1374
1375 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1376 struct x86_emulate_ops *ops)
1377 {
1378 struct decode_cache *c = &ctxt->decode;
1379
1380 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1381 }
1382
1383 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1384 {
1385 struct decode_cache *c = &ctxt->decode;
1386 switch (c->modrm_reg) {
1387 case 0: /* rol */
1388 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1389 break;
1390 case 1: /* ror */
1391 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1392 break;
1393 case 2: /* rcl */
1394 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1395 break;
1396 case 3: /* rcr */
1397 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1398 break;
1399 case 4: /* sal/shl */
1400 case 6: /* sal/shl */
1401 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1402 break;
1403 case 5: /* shr */
1404 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1405 break;
1406 case 7: /* sar */
1407 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1408 break;
1409 }
1410 }
1411
1412 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1413 struct x86_emulate_ops *ops)
1414 {
1415 struct decode_cache *c = &ctxt->decode;
1416 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1417 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1418
1419 switch (c->modrm_reg) {
1420 case 0 ... 1: /* test */
1421 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1422 break;
1423 case 2: /* not */
1424 c->dst.val = ~c->dst.val;
1425 break;
1426 case 3: /* neg */
1427 emulate_1op("neg", c->dst, ctxt->eflags);
1428 break;
1429 case 4: /* mul */
1430 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1431 break;
1432 case 5: /* imul */
1433 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1434 break;
1435 case 6: /* div */
1436 emulate_1op_rax_rdx("div", c->src, *rax, *rdx, ctxt->eflags);
1437 break;
1438 case 7: /* idiv */
1439 emulate_1op_rax_rdx("idiv", c->src, *rax, *rdx, ctxt->eflags);
1440 break;
1441 default:
1442 return X86EMUL_UNHANDLEABLE;
1443 }
1444 return X86EMUL_CONTINUE;
1445 }
1446
1447 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1448 struct x86_emulate_ops *ops)
1449 {
1450 struct decode_cache *c = &ctxt->decode;
1451
1452 switch (c->modrm_reg) {
1453 case 0: /* inc */
1454 emulate_1op("inc", c->dst, ctxt->eflags);
1455 break;
1456 case 1: /* dec */
1457 emulate_1op("dec", c->dst, ctxt->eflags);
1458 break;
1459 case 2: /* call near abs */ {
1460 long int old_eip;
1461 old_eip = c->eip;
1462 c->eip = c->src.val;
1463 c->src.val = old_eip;
1464 emulate_push(ctxt, ops);
1465 break;
1466 }
1467 case 4: /* jmp abs */
1468 c->eip = c->src.val;
1469 break;
1470 case 6: /* push */
1471 emulate_push(ctxt, ops);
1472 break;
1473 }
1474 return X86EMUL_CONTINUE;
1475 }
1476
1477 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1478 struct x86_emulate_ops *ops)
1479 {
1480 struct decode_cache *c = &ctxt->decode;
1481 u64 old = c->dst.orig_val64;
1482
1483 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1484 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1485 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1486 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1487 ctxt->eflags &= ~EFLG_ZF;
1488 } else {
1489 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1490 (u32) c->regs[VCPU_REGS_RBX];
1491
1492 ctxt->eflags |= EFLG_ZF;
1493 }
1494 return X86EMUL_CONTINUE;
1495 }
1496
1497 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1498 struct x86_emulate_ops *ops)
1499 {
1500 struct decode_cache *c = &ctxt->decode;
1501 int rc;
1502 unsigned long cs;
1503
1504 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1505 if (rc != X86EMUL_CONTINUE)
1506 return rc;
1507 if (c->op_bytes == 4)
1508 c->eip = (u32)c->eip;
1509 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1510 if (rc != X86EMUL_CONTINUE)
1511 return rc;
1512 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1513 return rc;
1514 }
1515
1516 static inline void
1517 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1518 struct x86_emulate_ops *ops, struct desc_struct *cs,
1519 struct desc_struct *ss)
1520 {
1521 memset(cs, 0, sizeof(struct desc_struct));
1522 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1523 memset(ss, 0, sizeof(struct desc_struct));
1524
1525 cs->l = 0; /* will be adjusted later */
1526 set_desc_base(cs, 0); /* flat segment */
1527 cs->g = 1; /* 4kb granularity */
1528 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1529 cs->type = 0x0b; /* Read, Execute, Accessed */
1530 cs->s = 1;
1531 cs->dpl = 0; /* will be adjusted later */
1532 cs->p = 1;
1533 cs->d = 1;
1534
1535 set_desc_base(ss, 0); /* flat segment */
1536 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1537 ss->g = 1; /* 4kb granularity */
1538 ss->s = 1;
1539 ss->type = 0x03; /* Read/Write, Accessed */
1540 ss->d = 1; /* 32bit stack segment */
1541 ss->dpl = 0;
1542 ss->p = 1;
1543 }
1544
1545 static int
1546 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1547 {
1548 struct decode_cache *c = &ctxt->decode;
1549 struct desc_struct cs, ss;
1550 u64 msr_data;
1551 u16 cs_sel, ss_sel;
1552
1553 /* syscall is not available in real mode */
1554 if (ctxt->mode == X86EMUL_MODE_REAL ||
1555 ctxt->mode == X86EMUL_MODE_VM86) {
1556 emulate_ud(ctxt);
1557 return X86EMUL_PROPAGATE_FAULT;
1558 }
1559
1560 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1561
1562 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1563 msr_data >>= 32;
1564 cs_sel = (u16)(msr_data & 0xfffc);
1565 ss_sel = (u16)(msr_data + 8);
1566
1567 if (is_long_mode(ctxt->vcpu)) {
1568 cs.d = 0;
1569 cs.l = 1;
1570 }
1571 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1572 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1573 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1574 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1575
1576 c->regs[VCPU_REGS_RCX] = c->eip;
1577 if (is_long_mode(ctxt->vcpu)) {
1578 #ifdef CONFIG_X86_64
1579 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1580
1581 ops->get_msr(ctxt->vcpu,
1582 ctxt->mode == X86EMUL_MODE_PROT64 ?
1583 MSR_LSTAR : MSR_CSTAR, &msr_data);
1584 c->eip = msr_data;
1585
1586 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1587 ctxt->eflags &= ~(msr_data | EFLG_RF);
1588 #endif
1589 } else {
1590 /* legacy mode */
1591 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1592 c->eip = (u32)msr_data;
1593
1594 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1595 }
1596
1597 return X86EMUL_CONTINUE;
1598 }
1599
1600 static int
1601 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1602 {
1603 struct decode_cache *c = &ctxt->decode;
1604 struct desc_struct cs, ss;
1605 u64 msr_data;
1606 u16 cs_sel, ss_sel;
1607
1608 /* inject #GP if in real mode */
1609 if (ctxt->mode == X86EMUL_MODE_REAL) {
1610 emulate_gp(ctxt, 0);
1611 return X86EMUL_PROPAGATE_FAULT;
1612 }
1613
1614 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1615 * Therefore, we inject an #UD.
1616 */
1617 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1618 emulate_ud(ctxt);
1619 return X86EMUL_PROPAGATE_FAULT;
1620 }
1621
1622 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1623
1624 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1625 switch (ctxt->mode) {
1626 case X86EMUL_MODE_PROT32:
1627 if ((msr_data & 0xfffc) == 0x0) {
1628 emulate_gp(ctxt, 0);
1629 return X86EMUL_PROPAGATE_FAULT;
1630 }
1631 break;
1632 case X86EMUL_MODE_PROT64:
1633 if (msr_data == 0x0) {
1634 emulate_gp(ctxt, 0);
1635 return X86EMUL_PROPAGATE_FAULT;
1636 }
1637 break;
1638 }
1639
1640 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1641 cs_sel = (u16)msr_data;
1642 cs_sel &= ~SELECTOR_RPL_MASK;
1643 ss_sel = cs_sel + 8;
1644 ss_sel &= ~SELECTOR_RPL_MASK;
1645 if (ctxt->mode == X86EMUL_MODE_PROT64
1646 || is_long_mode(ctxt->vcpu)) {
1647 cs.d = 0;
1648 cs.l = 1;
1649 }
1650
1651 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1652 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1653 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1654 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1655
1656 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1657 c->eip = msr_data;
1658
1659 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1660 c->regs[VCPU_REGS_RSP] = msr_data;
1661
1662 return X86EMUL_CONTINUE;
1663 }
1664
1665 static int
1666 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1667 {
1668 struct decode_cache *c = &ctxt->decode;
1669 struct desc_struct cs, ss;
1670 u64 msr_data;
1671 int usermode;
1672 u16 cs_sel, ss_sel;
1673
1674 /* inject #GP if in real mode or Virtual 8086 mode */
1675 if (ctxt->mode == X86EMUL_MODE_REAL ||
1676 ctxt->mode == X86EMUL_MODE_VM86) {
1677 emulate_gp(ctxt, 0);
1678 return X86EMUL_PROPAGATE_FAULT;
1679 }
1680
1681 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1682
1683 if ((c->rex_prefix & 0x8) != 0x0)
1684 usermode = X86EMUL_MODE_PROT64;
1685 else
1686 usermode = X86EMUL_MODE_PROT32;
1687
1688 cs.dpl = 3;
1689 ss.dpl = 3;
1690 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1691 switch (usermode) {
1692 case X86EMUL_MODE_PROT32:
1693 cs_sel = (u16)(msr_data + 16);
1694 if ((msr_data & 0xfffc) == 0x0) {
1695 emulate_gp(ctxt, 0);
1696 return X86EMUL_PROPAGATE_FAULT;
1697 }
1698 ss_sel = (u16)(msr_data + 24);
1699 break;
1700 case X86EMUL_MODE_PROT64:
1701 cs_sel = (u16)(msr_data + 32);
1702 if (msr_data == 0x0) {
1703 emulate_gp(ctxt, 0);
1704 return X86EMUL_PROPAGATE_FAULT;
1705 }
1706 ss_sel = cs_sel + 8;
1707 cs.d = 0;
1708 cs.l = 1;
1709 break;
1710 }
1711 cs_sel |= SELECTOR_RPL_MASK;
1712 ss_sel |= SELECTOR_RPL_MASK;
1713
1714 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1715 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1716 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1717 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1718
1719 c->eip = c->regs[VCPU_REGS_RDX];
1720 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1721
1722 return X86EMUL_CONTINUE;
1723 }
1724
1725 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1726 struct x86_emulate_ops *ops)
1727 {
1728 int iopl;
1729 if (ctxt->mode == X86EMUL_MODE_REAL)
1730 return false;
1731 if (ctxt->mode == X86EMUL_MODE_VM86)
1732 return true;
1733 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1734 return ops->cpl(ctxt->vcpu) > iopl;
1735 }
1736
1737 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1738 struct x86_emulate_ops *ops,
1739 u16 port, u16 len)
1740 {
1741 struct desc_struct tr_seg;
1742 int r;
1743 u16 io_bitmap_ptr;
1744 u8 perm, bit_idx = port & 0x7;
1745 unsigned mask = (1 << len) - 1;
1746
1747 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
1748 if (!tr_seg.p)
1749 return false;
1750 if (desc_limit_scaled(&tr_seg) < 103)
1751 return false;
1752 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
1753 ctxt->vcpu, NULL);
1754 if (r != X86EMUL_CONTINUE)
1755 return false;
1756 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1757 return false;
1758 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
1759 &perm, 1, ctxt->vcpu, NULL);
1760 if (r != X86EMUL_CONTINUE)
1761 return false;
1762 if ((perm >> bit_idx) & mask)
1763 return false;
1764 return true;
1765 }
1766
1767 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1768 struct x86_emulate_ops *ops,
1769 u16 port, u16 len)
1770 {
1771 if (ctxt->perm_ok)
1772 return true;
1773
1774 if (emulator_bad_iopl(ctxt, ops))
1775 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1776 return false;
1777
1778 ctxt->perm_ok = true;
1779
1780 return true;
1781 }
1782
1783 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1784 struct x86_emulate_ops *ops,
1785 struct tss_segment_16 *tss)
1786 {
1787 struct decode_cache *c = &ctxt->decode;
1788
1789 tss->ip = c->eip;
1790 tss->flag = ctxt->eflags;
1791 tss->ax = c->regs[VCPU_REGS_RAX];
1792 tss->cx = c->regs[VCPU_REGS_RCX];
1793 tss->dx = c->regs[VCPU_REGS_RDX];
1794 tss->bx = c->regs[VCPU_REGS_RBX];
1795 tss->sp = c->regs[VCPU_REGS_RSP];
1796 tss->bp = c->regs[VCPU_REGS_RBP];
1797 tss->si = c->regs[VCPU_REGS_RSI];
1798 tss->di = c->regs[VCPU_REGS_RDI];
1799
1800 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1801 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1802 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1803 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1804 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1805 }
1806
1807 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1808 struct x86_emulate_ops *ops,
1809 struct tss_segment_16 *tss)
1810 {
1811 struct decode_cache *c = &ctxt->decode;
1812 int ret;
1813
1814 c->eip = tss->ip;
1815 ctxt->eflags = tss->flag | 2;
1816 c->regs[VCPU_REGS_RAX] = tss->ax;
1817 c->regs[VCPU_REGS_RCX] = tss->cx;
1818 c->regs[VCPU_REGS_RDX] = tss->dx;
1819 c->regs[VCPU_REGS_RBX] = tss->bx;
1820 c->regs[VCPU_REGS_RSP] = tss->sp;
1821 c->regs[VCPU_REGS_RBP] = tss->bp;
1822 c->regs[VCPU_REGS_RSI] = tss->si;
1823 c->regs[VCPU_REGS_RDI] = tss->di;
1824
1825 /*
1826 * SDM says that segment selectors are loaded before segment
1827 * descriptors
1828 */
1829 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1830 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1831 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1832 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1833 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1834
1835 /*
1836 * Now load segment descriptors. If fault happenes at this stage
1837 * it is handled in a context of new task
1838 */
1839 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1840 if (ret != X86EMUL_CONTINUE)
1841 return ret;
1842 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1843 if (ret != X86EMUL_CONTINUE)
1844 return ret;
1845 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1846 if (ret != X86EMUL_CONTINUE)
1847 return ret;
1848 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1849 if (ret != X86EMUL_CONTINUE)
1850 return ret;
1851 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1852 if (ret != X86EMUL_CONTINUE)
1853 return ret;
1854
1855 return X86EMUL_CONTINUE;
1856 }
1857
1858 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1859 struct x86_emulate_ops *ops,
1860 u16 tss_selector, u16 old_tss_sel,
1861 ulong old_tss_base, struct desc_struct *new_desc)
1862 {
1863 struct tss_segment_16 tss_seg;
1864 int ret;
1865 u32 err, new_tss_base = get_desc_base(new_desc);
1866
1867 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1868 &err);
1869 if (ret == X86EMUL_PROPAGATE_FAULT) {
1870 /* FIXME: need to provide precise fault address */
1871 emulate_pf(ctxt, old_tss_base, err);
1872 return ret;
1873 }
1874
1875 save_state_to_tss16(ctxt, ops, &tss_seg);
1876
1877 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1878 &err);
1879 if (ret == X86EMUL_PROPAGATE_FAULT) {
1880 /* FIXME: need to provide precise fault address */
1881 emulate_pf(ctxt, old_tss_base, err);
1882 return ret;
1883 }
1884
1885 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1886 &err);
1887 if (ret == X86EMUL_PROPAGATE_FAULT) {
1888 /* FIXME: need to provide precise fault address */
1889 emulate_pf(ctxt, new_tss_base, err);
1890 return ret;
1891 }
1892
1893 if (old_tss_sel != 0xffff) {
1894 tss_seg.prev_task_link = old_tss_sel;
1895
1896 ret = ops->write_std(new_tss_base,
1897 &tss_seg.prev_task_link,
1898 sizeof tss_seg.prev_task_link,
1899 ctxt->vcpu, &err);
1900 if (ret == X86EMUL_PROPAGATE_FAULT) {
1901 /* FIXME: need to provide precise fault address */
1902 emulate_pf(ctxt, new_tss_base, err);
1903 return ret;
1904 }
1905 }
1906
1907 return load_state_from_tss16(ctxt, ops, &tss_seg);
1908 }
1909
1910 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
1911 struct x86_emulate_ops *ops,
1912 struct tss_segment_32 *tss)
1913 {
1914 struct decode_cache *c = &ctxt->decode;
1915
1916 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
1917 tss->eip = c->eip;
1918 tss->eflags = ctxt->eflags;
1919 tss->eax = c->regs[VCPU_REGS_RAX];
1920 tss->ecx = c->regs[VCPU_REGS_RCX];
1921 tss->edx = c->regs[VCPU_REGS_RDX];
1922 tss->ebx = c->regs[VCPU_REGS_RBX];
1923 tss->esp = c->regs[VCPU_REGS_RSP];
1924 tss->ebp = c->regs[VCPU_REGS_RBP];
1925 tss->esi = c->regs[VCPU_REGS_RSI];
1926 tss->edi = c->regs[VCPU_REGS_RDI];
1927
1928 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1929 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1930 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1931 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1932 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
1933 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
1934 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1935 }
1936
1937 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
1938 struct x86_emulate_ops *ops,
1939 struct tss_segment_32 *tss)
1940 {
1941 struct decode_cache *c = &ctxt->decode;
1942 int ret;
1943
1944 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
1945 emulate_gp(ctxt, 0);
1946 return X86EMUL_PROPAGATE_FAULT;
1947 }
1948 c->eip = tss->eip;
1949 ctxt->eflags = tss->eflags | 2;
1950 c->regs[VCPU_REGS_RAX] = tss->eax;
1951 c->regs[VCPU_REGS_RCX] = tss->ecx;
1952 c->regs[VCPU_REGS_RDX] = tss->edx;
1953 c->regs[VCPU_REGS_RBX] = tss->ebx;
1954 c->regs[VCPU_REGS_RSP] = tss->esp;
1955 c->regs[VCPU_REGS_RBP] = tss->ebp;
1956 c->regs[VCPU_REGS_RSI] = tss->esi;
1957 c->regs[VCPU_REGS_RDI] = tss->edi;
1958
1959 /*
1960 * SDM says that segment selectors are loaded before segment
1961 * descriptors
1962 */
1963 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
1964 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1965 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1966 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1967 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1968 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
1969 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
1970
1971 /*
1972 * Now load segment descriptors. If fault happenes at this stage
1973 * it is handled in a context of new task
1974 */
1975 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
1976 if (ret != X86EMUL_CONTINUE)
1977 return ret;
1978 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1979 if (ret != X86EMUL_CONTINUE)
1980 return ret;
1981 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1982 if (ret != X86EMUL_CONTINUE)
1983 return ret;
1984 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1985 if (ret != X86EMUL_CONTINUE)
1986 return ret;
1987 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1988 if (ret != X86EMUL_CONTINUE)
1989 return ret;
1990 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
1991 if (ret != X86EMUL_CONTINUE)
1992 return ret;
1993 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
1994 if (ret != X86EMUL_CONTINUE)
1995 return ret;
1996
1997 return X86EMUL_CONTINUE;
1998 }
1999
2000 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2001 struct x86_emulate_ops *ops,
2002 u16 tss_selector, u16 old_tss_sel,
2003 ulong old_tss_base, struct desc_struct *new_desc)
2004 {
2005 struct tss_segment_32 tss_seg;
2006 int ret;
2007 u32 err, new_tss_base = get_desc_base(new_desc);
2008
2009 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2010 &err);
2011 if (ret == X86EMUL_PROPAGATE_FAULT) {
2012 /* FIXME: need to provide precise fault address */
2013 emulate_pf(ctxt, old_tss_base, err);
2014 return ret;
2015 }
2016
2017 save_state_to_tss32(ctxt, ops, &tss_seg);
2018
2019 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2020 &err);
2021 if (ret == X86EMUL_PROPAGATE_FAULT) {
2022 /* FIXME: need to provide precise fault address */
2023 emulate_pf(ctxt, old_tss_base, err);
2024 return ret;
2025 }
2026
2027 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2028 &err);
2029 if (ret == X86EMUL_PROPAGATE_FAULT) {
2030 /* FIXME: need to provide precise fault address */
2031 emulate_pf(ctxt, new_tss_base, err);
2032 return ret;
2033 }
2034
2035 if (old_tss_sel != 0xffff) {
2036 tss_seg.prev_task_link = old_tss_sel;
2037
2038 ret = ops->write_std(new_tss_base,
2039 &tss_seg.prev_task_link,
2040 sizeof tss_seg.prev_task_link,
2041 ctxt->vcpu, &err);
2042 if (ret == X86EMUL_PROPAGATE_FAULT) {
2043 /* FIXME: need to provide precise fault address */
2044 emulate_pf(ctxt, new_tss_base, err);
2045 return ret;
2046 }
2047 }
2048
2049 return load_state_from_tss32(ctxt, ops, &tss_seg);
2050 }
2051
2052 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2053 struct x86_emulate_ops *ops,
2054 u16 tss_selector, int reason,
2055 bool has_error_code, u32 error_code)
2056 {
2057 struct desc_struct curr_tss_desc, next_tss_desc;
2058 int ret;
2059 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2060 ulong old_tss_base =
2061 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2062 u32 desc_limit;
2063
2064 /* FIXME: old_tss_base == ~0 ? */
2065
2066 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2067 if (ret != X86EMUL_CONTINUE)
2068 return ret;
2069 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2070 if (ret != X86EMUL_CONTINUE)
2071 return ret;
2072
2073 /* FIXME: check that next_tss_desc is tss */
2074
2075 if (reason != TASK_SWITCH_IRET) {
2076 if ((tss_selector & 3) > next_tss_desc.dpl ||
2077 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2078 emulate_gp(ctxt, 0);
2079 return X86EMUL_PROPAGATE_FAULT;
2080 }
2081 }
2082
2083 desc_limit = desc_limit_scaled(&next_tss_desc);
2084 if (!next_tss_desc.p ||
2085 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2086 desc_limit < 0x2b)) {
2087 emulate_ts(ctxt, tss_selector & 0xfffc);
2088 return X86EMUL_PROPAGATE_FAULT;
2089 }
2090
2091 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2092 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2093 write_segment_descriptor(ctxt, ops, old_tss_sel,
2094 &curr_tss_desc);
2095 }
2096
2097 if (reason == TASK_SWITCH_IRET)
2098 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2099
2100 /* set back link to prev task only if NT bit is set in eflags
2101 note that old_tss_sel is not used afetr this point */
2102 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2103 old_tss_sel = 0xffff;
2104
2105 if (next_tss_desc.type & 8)
2106 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2107 old_tss_base, &next_tss_desc);
2108 else
2109 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2110 old_tss_base, &next_tss_desc);
2111 if (ret != X86EMUL_CONTINUE)
2112 return ret;
2113
2114 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2115 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2116
2117 if (reason != TASK_SWITCH_IRET) {
2118 next_tss_desc.type |= (1 << 1); /* set busy flag */
2119 write_segment_descriptor(ctxt, ops, tss_selector,
2120 &next_tss_desc);
2121 }
2122
2123 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2124 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2125 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2126
2127 if (has_error_code) {
2128 struct decode_cache *c = &ctxt->decode;
2129
2130 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2131 c->lock_prefix = 0;
2132 c->src.val = (unsigned long) error_code;
2133 emulate_push(ctxt, ops);
2134 }
2135
2136 return ret;
2137 }
2138
2139 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2140 u16 tss_selector, int reason,
2141 bool has_error_code, u32 error_code)
2142 {
2143 struct x86_emulate_ops *ops = ctxt->ops;
2144 struct decode_cache *c = &ctxt->decode;
2145 int rc;
2146
2147 c->eip = ctxt->eip;
2148 c->dst.type = OP_NONE;
2149
2150 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2151 has_error_code, error_code);
2152
2153 if (rc == X86EMUL_CONTINUE) {
2154 rc = writeback(ctxt, ops);
2155 if (rc == X86EMUL_CONTINUE)
2156 ctxt->eip = c->eip;
2157 }
2158
2159 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2160 }
2161
2162 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2163 int reg, struct operand *op)
2164 {
2165 struct decode_cache *c = &ctxt->decode;
2166 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2167
2168 register_address_increment(c, &c->regs[reg], df * op->bytes);
2169 op->addr.mem = register_address(c, base, c->regs[reg]);
2170 }
2171
2172 static int em_push(struct x86_emulate_ctxt *ctxt)
2173 {
2174 emulate_push(ctxt, ctxt->ops);
2175 return X86EMUL_CONTINUE;
2176 }
2177
2178 #define D(_y) { .flags = (_y) }
2179 #define N D(0)
2180 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2181 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2182 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2183
2184 static struct opcode group1[] = {
2185 X7(D(Lock)), N
2186 };
2187
2188 static struct opcode group1A[] = {
2189 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2190 };
2191
2192 static struct opcode group3[] = {
2193 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2194 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2195 X4(D(SrcMem | ModRM)),
2196 };
2197
2198 static struct opcode group4[] = {
2199 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2200 N, N, N, N, N, N,
2201 };
2202
2203 static struct opcode group5[] = {
2204 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2205 D(SrcMem | ModRM | Stack), N,
2206 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2207 D(SrcMem | ModRM | Stack), N,
2208 };
2209
2210 static struct group_dual group7 = { {
2211 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2212 D(SrcNone | ModRM | DstMem | Mov), N,
2213 D(SrcMem16 | ModRM | Mov | Priv),
2214 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2215 }, {
2216 D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
2217 D(SrcNone | ModRM | DstMem | Mov), N,
2218 D(SrcMem16 | ModRM | Mov | Priv), N,
2219 } };
2220
2221 static struct opcode group8[] = {
2222 N, N, N, N,
2223 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2224 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2225 };
2226
2227 static struct group_dual group9 = { {
2228 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2229 }, {
2230 N, N, N, N, N, N, N, N,
2231 } };
2232
2233 static struct opcode opcode_table[256] = {
2234 /* 0x00 - 0x07 */
2235 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2236 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2237 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2238 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2239 /* 0x08 - 0x0F */
2240 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2241 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2242 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2243 D(ImplicitOps | Stack | No64), N,
2244 /* 0x10 - 0x17 */
2245 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2246 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2247 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2248 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2249 /* 0x18 - 0x1F */
2250 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2251 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2252 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2253 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2254 /* 0x20 - 0x27 */
2255 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2256 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2257 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2258 /* 0x28 - 0x2F */
2259 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2260 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2261 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2262 /* 0x30 - 0x37 */
2263 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2264 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2265 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2266 /* 0x38 - 0x3F */
2267 D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
2268 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2269 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2270 N, N,
2271 /* 0x40 - 0x4F */
2272 X16(D(DstReg)),
2273 /* 0x50 - 0x57 */
2274 X8(I(SrcReg | Stack, em_push)),
2275 /* 0x58 - 0x5F */
2276 X8(D(DstReg | Stack)),
2277 /* 0x60 - 0x67 */
2278 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2279 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2280 N, N, N, N,
2281 /* 0x68 - 0x6F */
2282 I(SrcImm | Mov | Stack, em_push), N,
2283 I(SrcImmByte | Mov | Stack, em_push), N,
2284 D(DstDI | ByteOp | Mov | String), D(DstDI | Mov | String), /* insb, insw/insd */
2285 D(SrcSI | ByteOp | ImplicitOps | String), D(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2286 /* 0x70 - 0x7F */
2287 X16(D(SrcImmByte)),
2288 /* 0x80 - 0x87 */
2289 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2290 G(DstMem | SrcImm | ModRM | Group, group1),
2291 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2292 G(DstMem | SrcImmByte | ModRM | Group, group1),
2293 D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
2294 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2295 /* 0x88 - 0x8F */
2296 D(ByteOp | DstMem | SrcReg | ModRM | Mov), D(DstMem | SrcReg | ModRM | Mov),
2297 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem | ModRM | Mov),
2298 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2299 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2300 /* 0x90 - 0x97 */
2301 X8(D(SrcAcc | DstReg)),
2302 /* 0x98 - 0x9F */
2303 N, N, D(SrcImmFAddr | No64), N,
2304 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2305 /* 0xA0 - 0xA7 */
2306 D(ByteOp | DstAcc | SrcMem | Mov | MemAbs), D(DstAcc | SrcMem | Mov | MemAbs),
2307 D(ByteOp | DstMem | SrcAcc | Mov | MemAbs), D(DstMem | SrcAcc | Mov | MemAbs),
2308 D(ByteOp | SrcSI | DstDI | Mov | String), D(SrcSI | DstDI | Mov | String),
2309 D(ByteOp | SrcSI | DstDI | String), D(SrcSI | DstDI | String),
2310 /* 0xA8 - 0xAF */
2311 D(DstAcc | SrcImmByte | ByteOp), D(DstAcc | SrcImm),
2312 D(ByteOp | SrcAcc | DstDI | Mov | String), D(SrcAcc | DstDI | Mov | String),
2313 D(ByteOp | SrcSI | DstAcc | Mov | String), D(SrcSI | DstAcc | Mov | String),
2314 D(ByteOp | DstDI | String), D(DstDI | String),
2315 /* 0xB0 - 0xB7 */
2316 X8(D(ByteOp | DstReg | SrcImm | Mov)),
2317 /* 0xB8 - 0xBF */
2318 X8(D(DstReg | SrcImm | Mov)),
2319 /* 0xC0 - 0xC7 */
2320 D(ByteOp | DstMem | SrcImm | ModRM), D(DstMem | SrcImmByte | ModRM),
2321 N, D(ImplicitOps | Stack), N, N,
2322 D(ByteOp | DstMem | SrcImm | ModRM | Mov), D(DstMem | SrcImm | ModRM | Mov),
2323 /* 0xC8 - 0xCF */
2324 N, N, N, D(ImplicitOps | Stack),
2325 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2326 /* 0xD0 - 0xD7 */
2327 D(ByteOp | DstMem | SrcOne | ModRM), D(DstMem | SrcOne | ModRM),
2328 D(ByteOp | DstMem | SrcImplicit | ModRM), D(DstMem | SrcImplicit | ModRM),
2329 N, N, N, N,
2330 /* 0xD8 - 0xDF */
2331 N, N, N, N, N, N, N, N,
2332 /* 0xE0 - 0xE7 */
2333 N, N, N, N,
2334 D(ByteOp | SrcImmUByte | DstAcc), D(SrcImmUByte | DstAcc),
2335 D(ByteOp | SrcAcc | DstImmUByte), D(SrcAcc | DstImmUByte),
2336 /* 0xE8 - 0xEF */
2337 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2338 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2339 D(SrcNone | ByteOp | DstAcc), D(SrcNone | DstAcc),
2340 D(ByteOp | SrcAcc | ImplicitOps), D(SrcAcc | ImplicitOps),
2341 /* 0xF0 - 0xF7 */
2342 N, N, N, N,
2343 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2344 /* 0xF8 - 0xFF */
2345 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2346 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2347 };
2348
2349 static struct opcode twobyte_table[256] = {
2350 /* 0x00 - 0x0F */
2351 N, GD(0, &group7), N, N,
2352 N, D(ImplicitOps), D(ImplicitOps | Priv), N,
2353 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2354 N, D(ImplicitOps | ModRM), N, N,
2355 /* 0x10 - 0x1F */
2356 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2357 /* 0x20 - 0x2F */
2358 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2359 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2360 N, N, N, N,
2361 N, N, N, N, N, N, N, N,
2362 /* 0x30 - 0x3F */
2363 D(ImplicitOps | Priv), N, D(ImplicitOps | Priv), N,
2364 D(ImplicitOps), D(ImplicitOps | Priv), N, N,
2365 N, N, N, N, N, N, N, N,
2366 /* 0x40 - 0x4F */
2367 X16(D(DstReg | SrcMem | ModRM | Mov)),
2368 /* 0x50 - 0x5F */
2369 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2370 /* 0x60 - 0x6F */
2371 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2372 /* 0x70 - 0x7F */
2373 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2374 /* 0x80 - 0x8F */
2375 X16(D(SrcImm)),
2376 /* 0x90 - 0x9F */
2377 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2378 /* 0xA0 - 0xA7 */
2379 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2380 N, D(DstMem | SrcReg | ModRM | BitOp),
2381 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2382 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2383 /* 0xA8 - 0xAF */
2384 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2385 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2386 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2387 D(DstMem | SrcReg | Src2CL | ModRM),
2388 D(ModRM), N,
2389 /* 0xB0 - 0xB7 */
2390 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2391 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2392 N, N, D(ByteOp | DstReg | SrcMem | ModRM | Mov),
2393 D(DstReg | SrcMem16 | ModRM | Mov),
2394 /* 0xB8 - 0xBF */
2395 N, N,
2396 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2397 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2398 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2399 /* 0xC0 - 0xCF */
2400 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2401 N, D(DstMem | SrcReg | ModRM | Mov),
2402 N, N, N, GD(0, &group9),
2403 N, N, N, N, N, N, N, N,
2404 /* 0xD0 - 0xDF */
2405 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2406 /* 0xE0 - 0xEF */
2407 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2408 /* 0xF0 - 0xFF */
2409 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2410 };
2411
2412 #undef D
2413 #undef N
2414 #undef G
2415 #undef GD
2416 #undef I
2417
2418 int
2419 x86_decode_insn(struct x86_emulate_ctxt *ctxt)
2420 {
2421 struct x86_emulate_ops *ops = ctxt->ops;
2422 struct decode_cache *c = &ctxt->decode;
2423 int rc = X86EMUL_CONTINUE;
2424 int mode = ctxt->mode;
2425 int def_op_bytes, def_ad_bytes, dual, goffset;
2426 struct opcode opcode, *g_mod012, *g_mod3;
2427 struct operand memop = { .type = OP_NONE };
2428
2429 /* we cannot decode insn before we complete previous rep insn */
2430 WARN_ON(ctxt->restart);
2431
2432 c->eip = ctxt->eip;
2433 c->fetch.start = c->fetch.end = c->eip;
2434 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2435
2436 switch (mode) {
2437 case X86EMUL_MODE_REAL:
2438 case X86EMUL_MODE_VM86:
2439 case X86EMUL_MODE_PROT16:
2440 def_op_bytes = def_ad_bytes = 2;
2441 break;
2442 case X86EMUL_MODE_PROT32:
2443 def_op_bytes = def_ad_bytes = 4;
2444 break;
2445 #ifdef CONFIG_X86_64
2446 case X86EMUL_MODE_PROT64:
2447 def_op_bytes = 4;
2448 def_ad_bytes = 8;
2449 break;
2450 #endif
2451 default:
2452 return -1;
2453 }
2454
2455 c->op_bytes = def_op_bytes;
2456 c->ad_bytes = def_ad_bytes;
2457
2458 /* Legacy prefixes. */
2459 for (;;) {
2460 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2461 case 0x66: /* operand-size override */
2462 /* switch between 2/4 bytes */
2463 c->op_bytes = def_op_bytes ^ 6;
2464 break;
2465 case 0x67: /* address-size override */
2466 if (mode == X86EMUL_MODE_PROT64)
2467 /* switch between 4/8 bytes */
2468 c->ad_bytes = def_ad_bytes ^ 12;
2469 else
2470 /* switch between 2/4 bytes */
2471 c->ad_bytes = def_ad_bytes ^ 6;
2472 break;
2473 case 0x26: /* ES override */
2474 case 0x2e: /* CS override */
2475 case 0x36: /* SS override */
2476 case 0x3e: /* DS override */
2477 set_seg_override(c, (c->b >> 3) & 3);
2478 break;
2479 case 0x64: /* FS override */
2480 case 0x65: /* GS override */
2481 set_seg_override(c, c->b & 7);
2482 break;
2483 case 0x40 ... 0x4f: /* REX */
2484 if (mode != X86EMUL_MODE_PROT64)
2485 goto done_prefixes;
2486 c->rex_prefix = c->b;
2487 continue;
2488 case 0xf0: /* LOCK */
2489 c->lock_prefix = 1;
2490 break;
2491 case 0xf2: /* REPNE/REPNZ */
2492 c->rep_prefix = REPNE_PREFIX;
2493 break;
2494 case 0xf3: /* REP/REPE/REPZ */
2495 c->rep_prefix = REPE_PREFIX;
2496 break;
2497 default:
2498 goto done_prefixes;
2499 }
2500
2501 /* Any legacy prefix after a REX prefix nullifies its effect. */
2502
2503 c->rex_prefix = 0;
2504 }
2505
2506 done_prefixes:
2507
2508 /* REX prefix. */
2509 if (c->rex_prefix & 8)
2510 c->op_bytes = 8; /* REX.W */
2511
2512 /* Opcode byte(s). */
2513 opcode = opcode_table[c->b];
2514 /* Two-byte opcode? */
2515 if (c->b == 0x0f) {
2516 c->twobyte = 1;
2517 c->b = insn_fetch(u8, 1, c->eip);
2518 opcode = twobyte_table[c->b];
2519 }
2520 c->d = opcode.flags;
2521
2522 if (c->d & Group) {
2523 dual = c->d & GroupDual;
2524 c->modrm = insn_fetch(u8, 1, c->eip);
2525 --c->eip;
2526
2527 if (c->d & GroupDual) {
2528 g_mod012 = opcode.u.gdual->mod012;
2529 g_mod3 = opcode.u.gdual->mod3;
2530 } else
2531 g_mod012 = g_mod3 = opcode.u.group;
2532
2533 c->d &= ~(Group | GroupDual);
2534
2535 goffset = (c->modrm >> 3) & 7;
2536
2537 if ((c->modrm >> 6) == 3)
2538 opcode = g_mod3[goffset];
2539 else
2540 opcode = g_mod012[goffset];
2541 c->d |= opcode.flags;
2542 }
2543
2544 c->execute = opcode.u.execute;
2545
2546 /* Unrecognised? */
2547 if (c->d == 0 || (c->d & Undefined)) {
2548 DPRINTF("Cannot emulate %02x\n", c->b);
2549 return -1;
2550 }
2551
2552 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2553 c->op_bytes = 8;
2554
2555 if (c->d & Op3264) {
2556 if (mode == X86EMUL_MODE_PROT64)
2557 c->op_bytes = 8;
2558 else
2559 c->op_bytes = 4;
2560 }
2561
2562 /* ModRM and SIB bytes. */
2563 if (c->d & ModRM) {
2564 rc = decode_modrm(ctxt, ops, &memop);
2565 if (!c->has_seg_override)
2566 set_seg_override(c, c->modrm_seg);
2567 } else if (c->d & MemAbs)
2568 rc = decode_abs(ctxt, ops, &memop);
2569 if (rc != X86EMUL_CONTINUE)
2570 goto done;
2571
2572 if (!c->has_seg_override)
2573 set_seg_override(c, VCPU_SREG_DS);
2574
2575 if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
2576 memop.addr.mem += seg_override_base(ctxt, ops, c);
2577
2578 if (memop.type == OP_MEM && c->ad_bytes != 8)
2579 memop.addr.mem = (u32)memop.addr.mem;
2580
2581 if (memop.type == OP_MEM && c->rip_relative)
2582 memop.addr.mem += c->eip;
2583
2584 /*
2585 * Decode and fetch the source operand: register, memory
2586 * or immediate.
2587 */
2588 switch (c->d & SrcMask) {
2589 case SrcNone:
2590 break;
2591 case SrcReg:
2592 decode_register_operand(&c->src, c, 0);
2593 break;
2594 case SrcMem16:
2595 memop.bytes = 2;
2596 goto srcmem_common;
2597 case SrcMem32:
2598 memop.bytes = 4;
2599 goto srcmem_common;
2600 case SrcMem:
2601 memop.bytes = (c->d & ByteOp) ? 1 :
2602 c->op_bytes;
2603 srcmem_common:
2604 c->src = memop;
2605 break;
2606 case SrcImm:
2607 case SrcImmU:
2608 c->src.type = OP_IMM;
2609 c->src.addr.mem = c->eip;
2610 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2611 if (c->src.bytes == 8)
2612 c->src.bytes = 4;
2613 /* NB. Immediates are sign-extended as necessary. */
2614 switch (c->src.bytes) {
2615 case 1:
2616 c->src.val = insn_fetch(s8, 1, c->eip);
2617 break;
2618 case 2:
2619 c->src.val = insn_fetch(s16, 2, c->eip);
2620 break;
2621 case 4:
2622 c->src.val = insn_fetch(s32, 4, c->eip);
2623 break;
2624 }
2625 if ((c->d & SrcMask) == SrcImmU) {
2626 switch (c->src.bytes) {
2627 case 1:
2628 c->src.val &= 0xff;
2629 break;
2630 case 2:
2631 c->src.val &= 0xffff;
2632 break;
2633 case 4:
2634 c->src.val &= 0xffffffff;
2635 break;
2636 }
2637 }
2638 break;
2639 case SrcImmByte:
2640 case SrcImmUByte:
2641 c->src.type = OP_IMM;
2642 c->src.addr.mem = c->eip;
2643 c->src.bytes = 1;
2644 if ((c->d & SrcMask) == SrcImmByte)
2645 c->src.val = insn_fetch(s8, 1, c->eip);
2646 else
2647 c->src.val = insn_fetch(u8, 1, c->eip);
2648 break;
2649 case SrcAcc:
2650 c->src.type = OP_REG;
2651 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2652 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2653 fetch_register_operand(&c->src);
2654 break;
2655 case SrcOne:
2656 c->src.bytes = 1;
2657 c->src.val = 1;
2658 break;
2659 case SrcSI:
2660 c->src.type = OP_MEM;
2661 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2662 c->src.addr.mem =
2663 register_address(c, seg_override_base(ctxt, ops, c),
2664 c->regs[VCPU_REGS_RSI]);
2665 c->src.val = 0;
2666 break;
2667 case SrcImmFAddr:
2668 c->src.type = OP_IMM;
2669 c->src.addr.mem = c->eip;
2670 c->src.bytes = c->op_bytes + 2;
2671 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2672 break;
2673 case SrcMemFAddr:
2674 memop.bytes = c->op_bytes + 2;
2675 goto srcmem_common;
2676 break;
2677 }
2678
2679 /*
2680 * Decode and fetch the second source operand: register, memory
2681 * or immediate.
2682 */
2683 switch (c->d & Src2Mask) {
2684 case Src2None:
2685 break;
2686 case Src2CL:
2687 c->src2.bytes = 1;
2688 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2689 break;
2690 case Src2ImmByte:
2691 c->src2.type = OP_IMM;
2692 c->src2.addr.mem = c->eip;
2693 c->src2.bytes = 1;
2694 c->src2.val = insn_fetch(u8, 1, c->eip);
2695 break;
2696 case Src2One:
2697 c->src2.bytes = 1;
2698 c->src2.val = 1;
2699 break;
2700 }
2701
2702 /* Decode and fetch the destination operand: register or memory. */
2703 switch (c->d & DstMask) {
2704 case DstReg:
2705 decode_register_operand(&c->dst, c,
2706 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2707 break;
2708 case DstImmUByte:
2709 c->dst.type = OP_IMM;
2710 c->dst.addr.mem = c->eip;
2711 c->dst.bytes = 1;
2712 c->dst.val = insn_fetch(u8, 1, c->eip);
2713 break;
2714 case DstMem:
2715 case DstMem64:
2716 c->dst = memop;
2717 if ((c->d & DstMask) == DstMem64)
2718 c->dst.bytes = 8;
2719 else
2720 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2721 if (c->d & BitOp)
2722 fetch_bit_operand(c);
2723 c->dst.orig_val = c->dst.val;
2724 break;
2725 case DstAcc:
2726 c->dst.type = OP_REG;
2727 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2728 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2729 fetch_register_operand(&c->dst);
2730 c->dst.orig_val = c->dst.val;
2731 break;
2732 case DstDI:
2733 c->dst.type = OP_MEM;
2734 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2735 c->dst.addr.mem =
2736 register_address(c, es_base(ctxt, ops),
2737 c->regs[VCPU_REGS_RDI]);
2738 c->dst.val = 0;
2739 break;
2740 case ImplicitOps:
2741 /* Special instructions do their own operand decoding. */
2742 default:
2743 c->dst.type = OP_NONE; /* Disable writeback. */
2744 return 0;
2745 }
2746
2747 done:
2748 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2749 }
2750
2751 int
2752 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
2753 {
2754 struct x86_emulate_ops *ops = ctxt->ops;
2755 u64 msr_data;
2756 struct decode_cache *c = &ctxt->decode;
2757 int rc = X86EMUL_CONTINUE;
2758 int saved_dst_type = c->dst.type;
2759 int irq; /* Used for int 3, int, and into */
2760
2761 ctxt->decode.mem_read.pos = 0;
2762
2763 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2764 emulate_ud(ctxt);
2765 goto done;
2766 }
2767
2768 /* LOCK prefix is allowed only with some instructions */
2769 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2770 emulate_ud(ctxt);
2771 goto done;
2772 }
2773
2774 /* Privileged instruction can be executed only in CPL=0 */
2775 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2776 emulate_gp(ctxt, 0);
2777 goto done;
2778 }
2779
2780 if (c->rep_prefix && (c->d & String)) {
2781 ctxt->restart = true;
2782 /* All REP prefixes have the same first termination condition */
2783 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2784 string_done:
2785 ctxt->restart = false;
2786 ctxt->eip = c->eip;
2787 goto done;
2788 }
2789 /* The second termination condition only applies for REPE
2790 * and REPNE. Test if the repeat string operation prefix is
2791 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2792 * corresponding termination condition according to:
2793 * - if REPE/REPZ and ZF = 0 then done
2794 * - if REPNE/REPNZ and ZF = 1 then done
2795 */
2796 if ((c->b == 0xa6) || (c->b == 0xa7) ||
2797 (c->b == 0xae) || (c->b == 0xaf)) {
2798 if ((c->rep_prefix == REPE_PREFIX) &&
2799 ((ctxt->eflags & EFLG_ZF) == 0))
2800 goto string_done;
2801 if ((c->rep_prefix == REPNE_PREFIX) &&
2802 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))
2803 goto string_done;
2804 }
2805 c->eip = ctxt->eip;
2806 }
2807
2808 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
2809 rc = read_emulated(ctxt, ops, c->src.addr.mem,
2810 c->src.valptr, c->src.bytes);
2811 if (rc != X86EMUL_CONTINUE)
2812 goto done;
2813 c->src.orig_val64 = c->src.val64;
2814 }
2815
2816 if (c->src2.type == OP_MEM) {
2817 rc = read_emulated(ctxt, ops, c->src2.addr.mem,
2818 &c->src2.val, c->src2.bytes);
2819 if (rc != X86EMUL_CONTINUE)
2820 goto done;
2821 }
2822
2823 if ((c->d & DstMask) == ImplicitOps)
2824 goto special_insn;
2825
2826
2827 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
2828 /* optimisation - avoid slow emulated read if Mov */
2829 rc = read_emulated(ctxt, ops, c->dst.addr.mem,
2830 &c->dst.val, c->dst.bytes);
2831 if (rc != X86EMUL_CONTINUE)
2832 goto done;
2833 }
2834 c->dst.orig_val = c->dst.val;
2835
2836 special_insn:
2837
2838 if (c->execute) {
2839 rc = c->execute(ctxt);
2840 if (rc != X86EMUL_CONTINUE)
2841 goto done;
2842 goto writeback;
2843 }
2844
2845 if (c->twobyte)
2846 goto twobyte_insn;
2847
2848 switch (c->b) {
2849 case 0x00 ... 0x05:
2850 add: /* add */
2851 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
2852 break;
2853 case 0x06: /* push es */
2854 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
2855 break;
2856 case 0x07: /* pop es */
2857 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
2858 if (rc != X86EMUL_CONTINUE)
2859 goto done;
2860 break;
2861 case 0x08 ... 0x0d:
2862 or: /* or */
2863 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2864 break;
2865 case 0x0e: /* push cs */
2866 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
2867 break;
2868 case 0x10 ... 0x15:
2869 adc: /* adc */
2870 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
2871 break;
2872 case 0x16: /* push ss */
2873 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
2874 break;
2875 case 0x17: /* pop ss */
2876 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
2877 if (rc != X86EMUL_CONTINUE)
2878 goto done;
2879 break;
2880 case 0x18 ... 0x1d:
2881 sbb: /* sbb */
2882 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
2883 break;
2884 case 0x1e: /* push ds */
2885 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
2886 break;
2887 case 0x1f: /* pop ds */
2888 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
2889 if (rc != X86EMUL_CONTINUE)
2890 goto done;
2891 break;
2892 case 0x20 ... 0x25:
2893 and: /* and */
2894 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
2895 break;
2896 case 0x28 ... 0x2d:
2897 sub: /* sub */
2898 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
2899 break;
2900 case 0x30 ... 0x35:
2901 xor: /* xor */
2902 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
2903 break;
2904 case 0x38 ... 0x3d:
2905 cmp: /* cmp */
2906 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2907 break;
2908 case 0x40 ... 0x47: /* inc r16/r32 */
2909 emulate_1op("inc", c->dst, ctxt->eflags);
2910 break;
2911 case 0x48 ... 0x4f: /* dec r16/r32 */
2912 emulate_1op("dec", c->dst, ctxt->eflags);
2913 break;
2914 case 0x58 ... 0x5f: /* pop reg */
2915 pop_instruction:
2916 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
2917 if (rc != X86EMUL_CONTINUE)
2918 goto done;
2919 break;
2920 case 0x60: /* pusha */
2921 rc = emulate_pusha(ctxt, ops);
2922 if (rc != X86EMUL_CONTINUE)
2923 goto done;
2924 break;
2925 case 0x61: /* popa */
2926 rc = emulate_popa(ctxt, ops);
2927 if (rc != X86EMUL_CONTINUE)
2928 goto done;
2929 break;
2930 case 0x63: /* movsxd */
2931 if (ctxt->mode != X86EMUL_MODE_PROT64)
2932 goto cannot_emulate;
2933 c->dst.val = (s32) c->src.val;
2934 break;
2935 case 0x6c: /* insb */
2936 case 0x6d: /* insw/insd */
2937 c->src.val = c->regs[VCPU_REGS_RDX];
2938 goto do_io_in;
2939 case 0x6e: /* outsb */
2940 case 0x6f: /* outsw/outsd */
2941 c->dst.val = c->regs[VCPU_REGS_RDX];
2942 goto do_io_out;
2943 break;
2944 case 0x70 ... 0x7f: /* jcc (short) */
2945 if (test_cc(c->b, ctxt->eflags))
2946 jmp_rel(c, c->src.val);
2947 break;
2948 case 0x80 ... 0x83: /* Grp1 */
2949 switch (c->modrm_reg) {
2950 case 0:
2951 goto add;
2952 case 1:
2953 goto or;
2954 case 2:
2955 goto adc;
2956 case 3:
2957 goto sbb;
2958 case 4:
2959 goto and;
2960 case 5:
2961 goto sub;
2962 case 6:
2963 goto xor;
2964 case 7:
2965 goto cmp;
2966 }
2967 break;
2968 case 0x84 ... 0x85:
2969 test:
2970 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
2971 break;
2972 case 0x86 ... 0x87: /* xchg */
2973 xchg:
2974 /* Write back the register source. */
2975 c->src.val = c->dst.val;
2976 write_register_operand(&c->src);
2977 /*
2978 * Write back the memory destination with implicit LOCK
2979 * prefix.
2980 */
2981 c->dst.val = c->src.orig_val;
2982 c->lock_prefix = 1;
2983 break;
2984 case 0x88 ... 0x8b: /* mov */
2985 goto mov;
2986 case 0x8c: /* mov r/m, sreg */
2987 if (c->modrm_reg > VCPU_SREG_GS) {
2988 emulate_ud(ctxt);
2989 goto done;
2990 }
2991 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
2992 break;
2993 case 0x8d: /* lea r16/r32, m */
2994 c->dst.val = c->src.addr.mem;
2995 break;
2996 case 0x8e: { /* mov seg, r/m16 */
2997 uint16_t sel;
2998
2999 sel = c->src.val;
3000
3001 if (c->modrm_reg == VCPU_SREG_CS ||
3002 c->modrm_reg > VCPU_SREG_GS) {
3003 emulate_ud(ctxt);
3004 goto done;
3005 }
3006
3007 if (c->modrm_reg == VCPU_SREG_SS)
3008 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3009
3010 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3011
3012 c->dst.type = OP_NONE; /* Disable writeback. */
3013 break;
3014 }
3015 case 0x8f: /* pop (sole member of Grp1a) */
3016 rc = emulate_grp1a(ctxt, ops);
3017 if (rc != X86EMUL_CONTINUE)
3018 goto done;
3019 break;
3020 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3021 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3022 break;
3023 goto xchg;
3024 case 0x9c: /* pushf */
3025 c->src.val = (unsigned long) ctxt->eflags;
3026 emulate_push(ctxt, ops);
3027 break;
3028 case 0x9d: /* popf */
3029 c->dst.type = OP_REG;
3030 c->dst.addr.reg = &ctxt->eflags;
3031 c->dst.bytes = c->op_bytes;
3032 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3033 if (rc != X86EMUL_CONTINUE)
3034 goto done;
3035 break;
3036 case 0xa0 ... 0xa3: /* mov */
3037 case 0xa4 ... 0xa5: /* movs */
3038 goto mov;
3039 case 0xa6 ... 0xa7: /* cmps */
3040 c->dst.type = OP_NONE; /* Disable writeback. */
3041 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
3042 goto cmp;
3043 case 0xa8 ... 0xa9: /* test ax, imm */
3044 goto test;
3045 case 0xaa ... 0xab: /* stos */
3046 case 0xac ... 0xad: /* lods */
3047 goto mov;
3048 case 0xae ... 0xaf: /* scas */
3049 DPRINTF("Urk! I don't handle SCAS.\n");
3050 goto cannot_emulate;
3051 case 0xb0 ... 0xbf: /* mov r, imm */
3052 goto mov;
3053 case 0xc0 ... 0xc1:
3054 emulate_grp2(ctxt);
3055 break;
3056 case 0xc3: /* ret */
3057 c->dst.type = OP_REG;
3058 c->dst.addr.reg = &c->eip;
3059 c->dst.bytes = c->op_bytes;
3060 goto pop_instruction;
3061 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
3062 mov:
3063 c->dst.val = c->src.val;
3064 break;
3065 case 0xcb: /* ret far */
3066 rc = emulate_ret_far(ctxt, ops);
3067 if (rc != X86EMUL_CONTINUE)
3068 goto done;
3069 break;
3070 case 0xcc: /* int3 */
3071 irq = 3;
3072 goto do_interrupt;
3073 case 0xcd: /* int n */
3074 irq = c->src.val;
3075 do_interrupt:
3076 rc = emulate_int(ctxt, ops, irq);
3077 if (rc != X86EMUL_CONTINUE)
3078 goto done;
3079 break;
3080 case 0xce: /* into */
3081 if (ctxt->eflags & EFLG_OF) {
3082 irq = 4;
3083 goto do_interrupt;
3084 }
3085 break;
3086 case 0xcf: /* iret */
3087 rc = emulate_iret(ctxt, ops);
3088
3089 if (rc != X86EMUL_CONTINUE)
3090 goto done;
3091 break;
3092 case 0xd0 ... 0xd1: /* Grp2 */
3093 emulate_grp2(ctxt);
3094 break;
3095 case 0xd2 ... 0xd3: /* Grp2 */
3096 c->src.val = c->regs[VCPU_REGS_RCX];
3097 emulate_grp2(ctxt);
3098 break;
3099 case 0xe4: /* inb */
3100 case 0xe5: /* in */
3101 goto do_io_in;
3102 case 0xe6: /* outb */
3103 case 0xe7: /* out */
3104 goto do_io_out;
3105 case 0xe8: /* call (near) */ {
3106 long int rel = c->src.val;
3107 c->src.val = (unsigned long) c->eip;
3108 jmp_rel(c, rel);
3109 emulate_push(ctxt, ops);
3110 break;
3111 }
3112 case 0xe9: /* jmp rel */
3113 goto jmp;
3114 case 0xea: { /* jmp far */
3115 unsigned short sel;
3116 jump_far:
3117 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3118
3119 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3120 goto done;
3121
3122 c->eip = 0;
3123 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3124 break;
3125 }
3126 case 0xeb:
3127 jmp: /* jmp rel short */
3128 jmp_rel(c, c->src.val);
3129 c->dst.type = OP_NONE; /* Disable writeback. */
3130 break;
3131 case 0xec: /* in al,dx */
3132 case 0xed: /* in (e/r)ax,dx */
3133 c->src.val = c->regs[VCPU_REGS_RDX];
3134 do_io_in:
3135 c->dst.bytes = min(c->dst.bytes, 4u);
3136 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3137 emulate_gp(ctxt, 0);
3138 goto done;
3139 }
3140 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3141 &c->dst.val))
3142 goto done; /* IO is needed */
3143 break;
3144 case 0xee: /* out dx,al */
3145 case 0xef: /* out dx,(e/r)ax */
3146 c->dst.val = c->regs[VCPU_REGS_RDX];
3147 do_io_out:
3148 c->src.bytes = min(c->src.bytes, 4u);
3149 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3150 c->src.bytes)) {
3151 emulate_gp(ctxt, 0);
3152 goto done;
3153 }
3154 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3155 &c->src.val, 1, ctxt->vcpu);
3156 c->dst.type = OP_NONE; /* Disable writeback. */
3157 break;
3158 case 0xf4: /* hlt */
3159 ctxt->vcpu->arch.halt_request = 1;
3160 break;
3161 case 0xf5: /* cmc */
3162 /* complement carry flag from eflags reg */
3163 ctxt->eflags ^= EFLG_CF;
3164 break;
3165 case 0xf6 ... 0xf7: /* Grp3 */
3166 if (emulate_grp3(ctxt, ops) != X86EMUL_CONTINUE)
3167 goto cannot_emulate;
3168 break;
3169 case 0xf8: /* clc */
3170 ctxt->eflags &= ~EFLG_CF;
3171 break;
3172 case 0xf9: /* stc */
3173 ctxt->eflags |= EFLG_CF;
3174 break;
3175 case 0xfa: /* cli */
3176 if (emulator_bad_iopl(ctxt, ops)) {
3177 emulate_gp(ctxt, 0);
3178 goto done;
3179 } else
3180 ctxt->eflags &= ~X86_EFLAGS_IF;
3181 break;
3182 case 0xfb: /* sti */
3183 if (emulator_bad_iopl(ctxt, ops)) {
3184 emulate_gp(ctxt, 0);
3185 goto done;
3186 } else {
3187 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3188 ctxt->eflags |= X86_EFLAGS_IF;
3189 }
3190 break;
3191 case 0xfc: /* cld */
3192 ctxt->eflags &= ~EFLG_DF;
3193 break;
3194 case 0xfd: /* std */
3195 ctxt->eflags |= EFLG_DF;
3196 break;
3197 case 0xfe: /* Grp4 */
3198 grp45:
3199 rc = emulate_grp45(ctxt, ops);
3200 if (rc != X86EMUL_CONTINUE)
3201 goto done;
3202 break;
3203 case 0xff: /* Grp5 */
3204 if (c->modrm_reg == 5)
3205 goto jump_far;
3206 goto grp45;
3207 default:
3208 goto cannot_emulate;
3209 }
3210
3211 writeback:
3212 rc = writeback(ctxt, ops);
3213 if (rc != X86EMUL_CONTINUE)
3214 goto done;
3215
3216 /*
3217 * restore dst type in case the decoding will be reused
3218 * (happens for string instruction )
3219 */
3220 c->dst.type = saved_dst_type;
3221
3222 if ((c->d & SrcMask) == SrcSI)
3223 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
3224 VCPU_REGS_RSI, &c->src);
3225
3226 if ((c->d & DstMask) == DstDI)
3227 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3228 &c->dst);
3229
3230 if (c->rep_prefix && (c->d & String)) {
3231 struct read_cache *rc = &ctxt->decode.io_read;
3232 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3233 /*
3234 * Re-enter guest when pio read ahead buffer is empty or,
3235 * if it is not used, after each 1024 iteration.
3236 */
3237 if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
3238 (rc->end != 0 && rc->end == rc->pos))
3239 ctxt->restart = false;
3240 }
3241 /*
3242 * reset read cache here in case string instruction is restared
3243 * without decoding
3244 */
3245 ctxt->decode.mem_read.end = 0;
3246 ctxt->eip = c->eip;
3247
3248 done:
3249 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3250
3251 twobyte_insn:
3252 switch (c->b) {
3253 case 0x01: /* lgdt, lidt, lmsw */
3254 switch (c->modrm_reg) {
3255 u16 size;
3256 unsigned long address;
3257
3258 case 0: /* vmcall */
3259 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3260 goto cannot_emulate;
3261
3262 rc = kvm_fix_hypercall(ctxt->vcpu);
3263 if (rc != X86EMUL_CONTINUE)
3264 goto done;
3265
3266 /* Let the processor re-execute the fixed hypercall */
3267 c->eip = ctxt->eip;
3268 /* Disable writeback. */
3269 c->dst.type = OP_NONE;
3270 break;
3271 case 2: /* lgdt */
3272 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3273 &size, &address, c->op_bytes);
3274 if (rc != X86EMUL_CONTINUE)
3275 goto done;
3276 realmode_lgdt(ctxt->vcpu, size, address);
3277 /* Disable writeback. */
3278 c->dst.type = OP_NONE;
3279 break;
3280 case 3: /* lidt/vmmcall */
3281 if (c->modrm_mod == 3) {
3282 switch (c->modrm_rm) {
3283 case 1:
3284 rc = kvm_fix_hypercall(ctxt->vcpu);
3285 if (rc != X86EMUL_CONTINUE)
3286 goto done;
3287 break;
3288 default:
3289 goto cannot_emulate;
3290 }
3291 } else {
3292 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3293 &size, &address,
3294 c->op_bytes);
3295 if (rc != X86EMUL_CONTINUE)
3296 goto done;
3297 realmode_lidt(ctxt->vcpu, size, address);
3298 }
3299 /* Disable writeback. */
3300 c->dst.type = OP_NONE;
3301 break;
3302 case 4: /* smsw */
3303 c->dst.bytes = 2;
3304 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3305 break;
3306 case 6: /* lmsw */
3307 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3308 (c->src.val & 0x0f), ctxt->vcpu);
3309 c->dst.type = OP_NONE;
3310 break;
3311 case 5: /* not defined */
3312 emulate_ud(ctxt);
3313 goto done;
3314 case 7: /* invlpg*/
3315 emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
3316 /* Disable writeback. */
3317 c->dst.type = OP_NONE;
3318 break;
3319 default:
3320 goto cannot_emulate;
3321 }
3322 break;
3323 case 0x05: /* syscall */
3324 rc = emulate_syscall(ctxt, ops);
3325 if (rc != X86EMUL_CONTINUE)
3326 goto done;
3327 else
3328 goto writeback;
3329 break;
3330 case 0x06:
3331 emulate_clts(ctxt->vcpu);
3332 break;
3333 case 0x09: /* wbinvd */
3334 kvm_emulate_wbinvd(ctxt->vcpu);
3335 break;
3336 case 0x08: /* invd */
3337 case 0x0d: /* GrpP (prefetch) */
3338 case 0x18: /* Grp16 (prefetch/nop) */
3339 break;
3340 case 0x20: /* mov cr, reg */
3341 switch (c->modrm_reg) {
3342 case 1:
3343 case 5 ... 7:
3344 case 9 ... 15:
3345 emulate_ud(ctxt);
3346 goto done;
3347 }
3348 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3349 break;
3350 case 0x21: /* mov from dr to reg */
3351 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3352 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3353 emulate_ud(ctxt);
3354 goto done;
3355 }
3356 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3357 break;
3358 case 0x22: /* mov reg, cr */
3359 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3360 emulate_gp(ctxt, 0);
3361 goto done;
3362 }
3363 c->dst.type = OP_NONE;
3364 break;
3365 case 0x23: /* mov from reg to dr */
3366 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3367 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3368 emulate_ud(ctxt);
3369 goto done;
3370 }
3371
3372 if (ops->set_dr(c->modrm_reg, c->src.val &
3373 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3374 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3375 /* #UD condition is already handled by the code above */
3376 emulate_gp(ctxt, 0);
3377 goto done;
3378 }
3379
3380 c->dst.type = OP_NONE; /* no writeback */
3381 break;
3382 case 0x30:
3383 /* wrmsr */
3384 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3385 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3386 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3387 emulate_gp(ctxt, 0);
3388 goto done;
3389 }
3390 rc = X86EMUL_CONTINUE;
3391 break;
3392 case 0x32:
3393 /* rdmsr */
3394 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3395 emulate_gp(ctxt, 0);
3396 goto done;
3397 } else {
3398 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3399 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3400 }
3401 rc = X86EMUL_CONTINUE;
3402 break;
3403 case 0x34: /* sysenter */
3404 rc = emulate_sysenter(ctxt, ops);
3405 if (rc != X86EMUL_CONTINUE)
3406 goto done;
3407 else
3408 goto writeback;
3409 break;
3410 case 0x35: /* sysexit */
3411 rc = emulate_sysexit(ctxt, ops);
3412 if (rc != X86EMUL_CONTINUE)
3413 goto done;
3414 else
3415 goto writeback;
3416 break;
3417 case 0x40 ... 0x4f: /* cmov */
3418 c->dst.val = c->dst.orig_val = c->src.val;
3419 if (!test_cc(c->b, ctxt->eflags))
3420 c->dst.type = OP_NONE; /* no writeback */
3421 break;
3422 case 0x80 ... 0x8f: /* jnz rel, etc*/
3423 if (test_cc(c->b, ctxt->eflags))
3424 jmp_rel(c, c->src.val);
3425 break;
3426 case 0x90 ... 0x9f: /* setcc r/m8 */
3427 c->dst.val = test_cc(c->b, ctxt->eflags);
3428 break;
3429 case 0xa0: /* push fs */
3430 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3431 break;
3432 case 0xa1: /* pop fs */
3433 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3434 if (rc != X86EMUL_CONTINUE)
3435 goto done;
3436 break;
3437 case 0xa3:
3438 bt: /* bt */
3439 c->dst.type = OP_NONE;
3440 /* only subword offset */
3441 c->src.val &= (c->dst.bytes << 3) - 1;
3442 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3443 break;
3444 case 0xa4: /* shld imm8, r, r/m */
3445 case 0xa5: /* shld cl, r, r/m */
3446 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3447 break;
3448 case 0xa8: /* push gs */
3449 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3450 break;
3451 case 0xa9: /* pop gs */
3452 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3453 if (rc != X86EMUL_CONTINUE)
3454 goto done;
3455 break;
3456 case 0xab:
3457 bts: /* bts */
3458 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3459 break;
3460 case 0xac: /* shrd imm8, r, r/m */
3461 case 0xad: /* shrd cl, r, r/m */
3462 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3463 break;
3464 case 0xae: /* clflush */
3465 break;
3466 case 0xb0 ... 0xb1: /* cmpxchg */
3467 /*
3468 * Save real source value, then compare EAX against
3469 * destination.
3470 */
3471 c->src.orig_val = c->src.val;
3472 c->src.val = c->regs[VCPU_REGS_RAX];
3473 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3474 if (ctxt->eflags & EFLG_ZF) {
3475 /* Success: write back to memory. */
3476 c->dst.val = c->src.orig_val;
3477 } else {
3478 /* Failure: write the value we saw to EAX. */
3479 c->dst.type = OP_REG;
3480 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3481 }
3482 break;
3483 case 0xb3:
3484 btr: /* btr */
3485 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3486 break;
3487 case 0xb6 ... 0xb7: /* movzx */
3488 c->dst.bytes = c->op_bytes;
3489 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3490 : (u16) c->src.val;
3491 break;
3492 case 0xba: /* Grp8 */
3493 switch (c->modrm_reg & 3) {
3494 case 0:
3495 goto bt;
3496 case 1:
3497 goto bts;
3498 case 2:
3499 goto btr;
3500 case 3:
3501 goto btc;
3502 }
3503 break;
3504 case 0xbb:
3505 btc: /* btc */
3506 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3507 break;
3508 case 0xbc: { /* bsf */
3509 u8 zf;
3510 __asm__ ("bsf %2, %0; setz %1"
3511 : "=r"(c->dst.val), "=q"(zf)
3512 : "r"(c->src.val));
3513 ctxt->eflags &= ~X86_EFLAGS_ZF;
3514 if (zf) {
3515 ctxt->eflags |= X86_EFLAGS_ZF;
3516 c->dst.type = OP_NONE; /* Disable writeback. */
3517 }
3518 break;
3519 }
3520 case 0xbd: { /* bsr */
3521 u8 zf;
3522 __asm__ ("bsr %2, %0; setz %1"
3523 : "=r"(c->dst.val), "=q"(zf)
3524 : "r"(c->src.val));
3525 ctxt->eflags &= ~X86_EFLAGS_ZF;
3526 if (zf) {
3527 ctxt->eflags |= X86_EFLAGS_ZF;
3528 c->dst.type = OP_NONE; /* Disable writeback. */
3529 }
3530 break;
3531 }
3532 case 0xbe ... 0xbf: /* movsx */
3533 c->dst.bytes = c->op_bytes;
3534 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3535 (s16) c->src.val;
3536 break;
3537 case 0xc0 ... 0xc1: /* xadd */
3538 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3539 /* Write back the register source. */
3540 c->src.val = c->dst.orig_val;
3541 write_register_operand(&c->src);
3542 break;
3543 case 0xc3: /* movnti */
3544 c->dst.bytes = c->op_bytes;
3545 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3546 (u64) c->src.val;
3547 break;
3548 case 0xc7: /* Grp9 (cmpxchg8b) */
3549 rc = emulate_grp9(ctxt, ops);
3550 if (rc != X86EMUL_CONTINUE)
3551 goto done;
3552 break;
3553 default:
3554 goto cannot_emulate;
3555 }
3556 goto writeback;
3557
3558 cannot_emulate:
3559 DPRINTF("Cannot emulate %02x\n", c->b);
3560 return -1;
3561 }
This page took 0.105663 seconds and 5 git commands to generate.