1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<0) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<1) /* Register operand. */
53 #define DstMem (3<<1) /* Memory operand. */
54 #define DstAcc (4<<1) /* Destination Accumulator */
55 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<1) /* 64bit memory operand */
57 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
58 #define DstMask (7<<1)
59 /* Source operand type. */
60 #define SrcNone (0<<4) /* No source operand. */
61 #define SrcReg (1<<4) /* Register operand. */
62 #define SrcMem (2<<4) /* Memory operand. */
63 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65 #define SrcImm (5<<4) /* Immediate operand. */
66 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67 #define SrcOne (7<<4) /* Implied '1' */
68 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
70 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73 #define SrcAcc (0xd<<4) /* Source Accumulator */
74 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
75 #define SrcMask (0xf<<4)
76 /* Generic ModRM decode. */
78 /* Destination is only written; never read. */
81 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
82 #define String (1<<12) /* String instruction (rep capable) */
83 #define Stack (1<<13) /* Stack instruction (push/pop) */
84 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
85 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
87 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89 #define Undefined (1<<25) /* No Such Instruction */
90 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
91 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
93 /* Source 2 operand type */
94 #define Src2None (0<<29)
95 #define Src2CL (1<<29)
96 #define Src2ImmByte (2<<29)
97 #define Src2One (3<<29)
98 #define Src2Imm (4<<29)
99 #define Src2Mask (7<<29)
101 #define X2(x...) x, x
102 #define X3(x...) X2(x), x
103 #define X4(x...) X2(x), X2(x)
104 #define X5(x...) X4(x), x
105 #define X6(x...) X4(x), X2(x)
106 #define X7(x...) X4(x), X3(x)
107 #define X8(x...) X4(x), X4(x)
108 #define X16(x...) X8(x), X8(x)
113 int (*execute)(struct x86_emulate_ctxt *ctxt);
114 struct opcode *group;
115 struct group_dual *gdual;
120 struct opcode mod012[8];
121 struct opcode mod3[8];
124 /* EFLAGS bit definitions. */
125 #define EFLG_ID (1<<21)
126 #define EFLG_VIP (1<<20)
127 #define EFLG_VIF (1<<19)
128 #define EFLG_AC (1<<18)
129 #define EFLG_VM (1<<17)
130 #define EFLG_RF (1<<16)
131 #define EFLG_IOPL (3<<12)
132 #define EFLG_NT (1<<14)
133 #define EFLG_OF (1<<11)
134 #define EFLG_DF (1<<10)
135 #define EFLG_IF (1<<9)
136 #define EFLG_TF (1<<8)
137 #define EFLG_SF (1<<7)
138 #define EFLG_ZF (1<<6)
139 #define EFLG_AF (1<<4)
140 #define EFLG_PF (1<<2)
141 #define EFLG_CF (1<<0)
143 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
144 #define EFLG_RESERVED_ONE_MASK 2
147 * Instruction emulation:
148 * Most instructions are emulated directly via a fragment of inline assembly
149 * code. This allows us to save/restore EFLAGS and thus very easily pick up
150 * any modified flags.
153 #if defined(CONFIG_X86_64)
154 #define _LO32 "k" /* force 32-bit operand */
155 #define _STK "%%rsp" /* stack pointer */
156 #elif defined(__i386__)
157 #define _LO32 "" /* force 32-bit operand */
158 #define _STK "%%esp" /* stack pointer */
162 * These EFLAGS bits are restored from saved value during emulation, and
163 * any changes are written back to the saved value after emulation.
165 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
167 /* Before executing instruction: restore necessary bits in EFLAGS. */
168 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
169 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
170 "movl %"_sav",%"_LO32 _tmp"; " \
173 "movl %"_msk",%"_LO32 _tmp"; " \
174 "andl %"_LO32 _tmp",("_STK"); " \
176 "notl %"_LO32 _tmp"; " \
177 "andl %"_LO32 _tmp",("_STK"); " \
178 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
180 "orl %"_LO32 _tmp",("_STK"); " \
184 /* After executing instruction: write-back necessary bits in EFLAGS. */
185 #define _POST_EFLAGS(_sav, _msk, _tmp) \
186 /* _sav |= EFLAGS & _msk; */ \
189 "andl %"_msk",%"_LO32 _tmp"; " \
190 "orl %"_LO32 _tmp",%"_sav"; "
198 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
200 __asm__ __volatile__ ( \
201 _PRE_EFLAGS("0", "4", "2") \
202 _op _suffix " %"_x"3,%1; " \
203 _POST_EFLAGS("0", "4", "2") \
204 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
206 : _y ((_src).val), "i" (EFLAGS_MASK)); \
210 /* Raw emulation: instruction has two explicit operands. */
211 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
213 unsigned long _tmp; \
215 switch ((_dst).bytes) { \
217 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
220 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
223 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
228 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
230 unsigned long _tmp; \
231 switch ((_dst).bytes) { \
233 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
236 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
237 _wx, _wy, _lx, _ly, _qx, _qy); \
242 /* Source operand is byte-sized and may be restricted to just %cl. */
243 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
244 __emulate_2op(_op, _src, _dst, _eflags, \
245 "b", "c", "b", "c", "b", "c", "b", "c")
247 /* Source operand is byte, word, long or quad sized. */
248 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
249 __emulate_2op(_op, _src, _dst, _eflags, \
250 "b", "q", "w", "r", _LO32, "r", "", "r")
252 /* Source operand is word, long or quad sized. */
253 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
254 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
255 "w", "r", _LO32, "r", "", "r")
257 /* Instruction has three operands and one operand is stored in ECX register */
258 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
260 unsigned long _tmp; \
261 _type _clv = (_cl).val; \
262 _type _srcv = (_src).val; \
263 _type _dstv = (_dst).val; \
265 __asm__ __volatile__ ( \
266 _PRE_EFLAGS("0", "5", "2") \
267 _op _suffix " %4,%1 \n" \
268 _POST_EFLAGS("0", "5", "2") \
269 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
270 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
273 (_cl).val = (unsigned long) _clv; \
274 (_src).val = (unsigned long) _srcv; \
275 (_dst).val = (unsigned long) _dstv; \
278 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
280 switch ((_dst).bytes) { \
282 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
283 "w", unsigned short); \
286 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
287 "l", unsigned int); \
290 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
291 "q", unsigned long)); \
296 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
298 unsigned long _tmp; \
300 __asm__ __volatile__ ( \
301 _PRE_EFLAGS("0", "3", "2") \
302 _op _suffix " %1; " \
303 _POST_EFLAGS("0", "3", "2") \
304 : "=m" (_eflags), "+m" ((_dst).val), \
306 : "i" (EFLAGS_MASK)); \
309 /* Instruction has only one explicit operand (no source operand). */
310 #define emulate_1op(_op, _dst, _eflags) \
312 switch ((_dst).bytes) { \
313 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
314 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
315 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
316 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
320 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
322 unsigned long _tmp; \
324 __asm__ __volatile__ ( \
325 _PRE_EFLAGS("0", "4", "1") \
326 _op _suffix " %5; " \
327 _POST_EFLAGS("0", "4", "1") \
328 : "=m" (_eflags), "=&r" (_tmp), \
329 "+a" (_rax), "+d" (_rdx) \
330 : "i" (EFLAGS_MASK), "m" ((_src).val), \
331 "a" (_rax), "d" (_rdx)); \
334 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
335 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
337 switch((_src).bytes) { \
338 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
339 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
340 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
341 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
345 /* Fetch next part of the instruction being emulated. */
346 #define insn_fetch(_type, _size, _eip) \
347 ({ unsigned long _x; \
348 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
349 if (rc != X86EMUL_CONTINUE) \
355 #define insn_fetch_arr(_arr, _size, _eip) \
356 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
357 if (rc != X86EMUL_CONTINUE) \
362 static inline unsigned long ad_mask(struct decode_cache *c)
364 return (1UL << (c->ad_bytes << 3)) - 1;
367 /* Access/update address held in a register, based on addressing mode. */
368 static inline unsigned long
369 address_mask(struct decode_cache *c, unsigned long reg)
371 if (c->ad_bytes == sizeof(unsigned long))
374 return reg & ad_mask(c);
377 static inline unsigned long
378 register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
380 return base + address_mask(c, reg);
384 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
386 if (c->ad_bytes == sizeof(unsigned long))
389 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
392 static inline void jmp_rel(struct decode_cache *c, int rel)
394 register_address_increment(c, &c->eip, rel);
397 static void set_seg_override(struct decode_cache *c, int seg)
399 c->has_seg_override = true;
400 c->seg_override = seg;
403 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
404 struct x86_emulate_ops *ops, int seg)
406 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
409 return ops->get_cached_segment_base(seg, ctxt->vcpu);
412 static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
413 struct x86_emulate_ops *ops,
414 struct decode_cache *c)
416 if (!c->has_seg_override)
419 return seg_base(ctxt, ops, c->seg_override);
422 static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
423 struct x86_emulate_ops *ops)
425 return seg_base(ctxt, ops, VCPU_SREG_ES);
428 static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
429 struct x86_emulate_ops *ops)
431 return seg_base(ctxt, ops, VCPU_SREG_SS);
434 static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
435 u32 error, bool valid)
437 ctxt->exception = vec;
438 ctxt->error_code = error;
439 ctxt->error_code_valid = valid;
440 ctxt->restart = false;
443 static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
445 emulate_exception(ctxt, GP_VECTOR, err, true);
448 static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
452 emulate_exception(ctxt, PF_VECTOR, err, true);
455 static void emulate_ud(struct x86_emulate_ctxt *ctxt)
457 emulate_exception(ctxt, UD_VECTOR, 0, false);
460 static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
462 emulate_exception(ctxt, TS_VECTOR, err, true);
465 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
466 struct x86_emulate_ops *ops,
467 unsigned long eip, u8 *dest)
469 struct fetch_cache *fc = &ctxt->decode.fetch;
473 if (eip == fc->end) {
474 cur_size = fc->end - fc->start;
475 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
476 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
477 size, ctxt->vcpu, NULL);
478 if (rc != X86EMUL_CONTINUE)
482 *dest = fc->data[eip - fc->start];
483 return X86EMUL_CONTINUE;
486 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
487 struct x86_emulate_ops *ops,
488 unsigned long eip, void *dest, unsigned size)
492 /* x86 instructions are limited to 15 bytes. */
493 if (eip + size - ctxt->eip > 15)
494 return X86EMUL_UNHANDLEABLE;
496 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
497 if (rc != X86EMUL_CONTINUE)
500 return X86EMUL_CONTINUE;
504 * Given the 'reg' portion of a ModRM byte, and a register block, return a
505 * pointer into the block that addresses the relevant register.
506 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
508 static void *decode_register(u8 modrm_reg, unsigned long *regs,
513 p = ®s[modrm_reg];
514 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
515 p = (unsigned char *)®s[modrm_reg & 3] + 1;
519 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
520 struct x86_emulate_ops *ops,
522 u16 *size, unsigned long *address, int op_bytes)
529 rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
530 if (rc != X86EMUL_CONTINUE)
532 rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
536 static int test_cc(unsigned int condition, unsigned int flags)
540 switch ((condition & 15) >> 1) {
542 rc |= (flags & EFLG_OF);
544 case 1: /* b/c/nae */
545 rc |= (flags & EFLG_CF);
548 rc |= (flags & EFLG_ZF);
551 rc |= (flags & (EFLG_CF|EFLG_ZF));
554 rc |= (flags & EFLG_SF);
557 rc |= (flags & EFLG_PF);
560 rc |= (flags & EFLG_ZF);
563 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
567 /* Odd condition identifiers (lsb == 1) have inverted sense. */
568 return (!!rc ^ (condition & 1));
571 static void fetch_register_operand(struct operand *op)
575 op->val = *(u8 *)op->addr.reg;
578 op->val = *(u16 *)op->addr.reg;
581 op->val = *(u32 *)op->addr.reg;
584 op->val = *(u64 *)op->addr.reg;
589 static void decode_register_operand(struct operand *op,
590 struct decode_cache *c,
593 unsigned reg = c->modrm_reg;
594 int highbyte_regs = c->rex_prefix == 0;
597 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
599 if ((c->d & ByteOp) && !inhibit_bytereg) {
600 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
603 op->addr.reg = decode_register(reg, c->regs, 0);
604 op->bytes = c->op_bytes;
606 fetch_register_operand(op);
607 op->orig_val = op->val;
610 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
611 struct x86_emulate_ops *ops,
614 struct decode_cache *c = &ctxt->decode;
616 int index_reg = 0, base_reg = 0, scale;
617 int rc = X86EMUL_CONTINUE;
621 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
622 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
623 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
626 c->modrm = insn_fetch(u8, 1, c->eip);
627 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
628 c->modrm_reg |= (c->modrm & 0x38) >> 3;
629 c->modrm_rm |= (c->modrm & 0x07);
630 c->modrm_seg = VCPU_SREG_DS;
632 if (c->modrm_mod == 3) {
634 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
635 op->addr.reg = decode_register(c->modrm_rm,
636 c->regs, c->d & ByteOp);
637 fetch_register_operand(op);
643 if (c->ad_bytes == 2) {
644 unsigned bx = c->regs[VCPU_REGS_RBX];
645 unsigned bp = c->regs[VCPU_REGS_RBP];
646 unsigned si = c->regs[VCPU_REGS_RSI];
647 unsigned di = c->regs[VCPU_REGS_RDI];
649 /* 16-bit ModR/M decode. */
650 switch (c->modrm_mod) {
652 if (c->modrm_rm == 6)
653 modrm_ea += insn_fetch(u16, 2, c->eip);
656 modrm_ea += insn_fetch(s8, 1, c->eip);
659 modrm_ea += insn_fetch(u16, 2, c->eip);
662 switch (c->modrm_rm) {
682 if (c->modrm_mod != 0)
689 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
690 (c->modrm_rm == 6 && c->modrm_mod != 0))
691 c->modrm_seg = VCPU_SREG_SS;
692 modrm_ea = (u16)modrm_ea;
694 /* 32/64-bit ModR/M decode. */
695 if ((c->modrm_rm & 7) == 4) {
696 sib = insn_fetch(u8, 1, c->eip);
697 index_reg |= (sib >> 3) & 7;
701 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
702 modrm_ea += insn_fetch(s32, 4, c->eip);
704 modrm_ea += c->regs[base_reg];
706 modrm_ea += c->regs[index_reg] << scale;
707 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
708 if (ctxt->mode == X86EMUL_MODE_PROT64)
711 modrm_ea += c->regs[c->modrm_rm];
712 switch (c->modrm_mod) {
714 if (c->modrm_rm == 5)
715 modrm_ea += insn_fetch(s32, 4, c->eip);
718 modrm_ea += insn_fetch(s8, 1, c->eip);
721 modrm_ea += insn_fetch(s32, 4, c->eip);
725 op->addr.mem = modrm_ea;
730 static int decode_abs(struct x86_emulate_ctxt *ctxt,
731 struct x86_emulate_ops *ops,
734 struct decode_cache *c = &ctxt->decode;
735 int rc = X86EMUL_CONTINUE;
738 switch (c->ad_bytes) {
740 op->addr.mem = insn_fetch(u16, 2, c->eip);
743 op->addr.mem = insn_fetch(u32, 4, c->eip);
746 op->addr.mem = insn_fetch(u64, 8, c->eip);
753 static void fetch_bit_operand(struct decode_cache *c)
757 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
758 mask = ~(c->dst.bytes * 8 - 1);
760 if (c->src.bytes == 2)
761 sv = (s16)c->src.val & (s16)mask;
762 else if (c->src.bytes == 4)
763 sv = (s32)c->src.val & (s32)mask;
765 c->dst.addr.mem += (sv >> 3);
768 /* only subword offset */
769 c->src.val &= (c->dst.bytes << 3) - 1;
772 static int read_emulated(struct x86_emulate_ctxt *ctxt,
773 struct x86_emulate_ops *ops,
774 unsigned long addr, void *dest, unsigned size)
777 struct read_cache *mc = &ctxt->decode.mem_read;
781 int n = min(size, 8u);
783 if (mc->pos < mc->end)
786 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
788 if (rc == X86EMUL_PROPAGATE_FAULT)
789 emulate_pf(ctxt, addr, err);
790 if (rc != X86EMUL_CONTINUE)
795 memcpy(dest, mc->data + mc->pos, n);
800 return X86EMUL_CONTINUE;
803 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
804 struct x86_emulate_ops *ops,
805 unsigned int size, unsigned short port,
808 struct read_cache *rc = &ctxt->decode.io_read;
810 if (rc->pos == rc->end) { /* refill pio read ahead */
811 struct decode_cache *c = &ctxt->decode;
812 unsigned int in_page, n;
813 unsigned int count = c->rep_prefix ?
814 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
815 in_page = (ctxt->eflags & EFLG_DF) ?
816 offset_in_page(c->regs[VCPU_REGS_RDI]) :
817 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
818 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
822 rc->pos = rc->end = 0;
823 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
828 memcpy(dest, rc->data + rc->pos, size);
833 static u32 desc_limit_scaled(struct desc_struct *desc)
835 u32 limit = get_desc_limit(desc);
837 return desc->g ? (limit << 12) | 0xfff : limit;
840 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
841 struct x86_emulate_ops *ops,
842 u16 selector, struct desc_ptr *dt)
844 if (selector & 1 << 2) {
845 struct desc_struct desc;
846 memset (dt, 0, sizeof *dt);
847 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
850 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
851 dt->address = get_desc_base(&desc);
853 ops->get_gdt(dt, ctxt->vcpu);
856 /* allowed just for 8 bytes segments */
857 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
858 struct x86_emulate_ops *ops,
859 u16 selector, struct desc_struct *desc)
862 u16 index = selector >> 3;
867 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
869 if (dt.size < index * 8 + 7) {
870 emulate_gp(ctxt, selector & 0xfffc);
871 return X86EMUL_PROPAGATE_FAULT;
873 addr = dt.address + index * 8;
874 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
875 if (ret == X86EMUL_PROPAGATE_FAULT)
876 emulate_pf(ctxt, addr, err);
881 /* allowed just for 8 bytes segments */
882 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
883 struct x86_emulate_ops *ops,
884 u16 selector, struct desc_struct *desc)
887 u16 index = selector >> 3;
892 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
894 if (dt.size < index * 8 + 7) {
895 emulate_gp(ctxt, selector & 0xfffc);
896 return X86EMUL_PROPAGATE_FAULT;
899 addr = dt.address + index * 8;
900 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
901 if (ret == X86EMUL_PROPAGATE_FAULT)
902 emulate_pf(ctxt, addr, err);
907 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
908 struct x86_emulate_ops *ops,
909 u16 selector, int seg)
911 struct desc_struct seg_desc;
913 unsigned err_vec = GP_VECTOR;
915 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
918 memset(&seg_desc, 0, sizeof seg_desc);
920 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
921 || ctxt->mode == X86EMUL_MODE_REAL) {
922 /* set real mode segment descriptor */
923 set_desc_base(&seg_desc, selector << 4);
924 set_desc_limit(&seg_desc, 0xffff);
931 /* NULL selector is not valid for TR, CS and SS */
932 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
936 /* TR should be in GDT only */
937 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
940 if (null_selector) /* for NULL selector skip all following checks */
943 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
944 if (ret != X86EMUL_CONTINUE)
947 err_code = selector & 0xfffc;
950 /* can't load system descriptor into segment selecor */
951 if (seg <= VCPU_SREG_GS && !seg_desc.s)
955 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
961 cpl = ops->cpl(ctxt->vcpu);
966 * segment is not a writable data segment or segment
967 * selector's RPL != CPL or segment selector's RPL != CPL
969 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
973 if (!(seg_desc.type & 8))
976 if (seg_desc.type & 4) {
982 if (rpl > cpl || dpl != cpl)
986 selector = (selector & 0xfffc) | cpl;
989 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
993 if (seg_desc.s || seg_desc.type != 2)
996 default: /* DS, ES, FS, or GS */
998 * segment is not a data or readable code segment or
999 * ((segment is a data or nonconforming code segment)
1000 * and (both RPL and CPL > DPL))
1002 if ((seg_desc.type & 0xa) == 0x8 ||
1003 (((seg_desc.type & 0xc) != 0xc) &&
1004 (rpl > dpl && cpl > dpl)))
1010 /* mark segment as accessed */
1012 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1013 if (ret != X86EMUL_CONTINUE)
1017 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1018 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1019 return X86EMUL_CONTINUE;
1021 emulate_exception(ctxt, err_vec, err_code, true);
1022 return X86EMUL_PROPAGATE_FAULT;
1025 static void write_register_operand(struct operand *op)
1027 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1028 switch (op->bytes) {
1030 *(u8 *)op->addr.reg = (u8)op->val;
1033 *(u16 *)op->addr.reg = (u16)op->val;
1036 *op->addr.reg = (u32)op->val;
1037 break; /* 64b: zero-extend */
1039 *op->addr.reg = op->val;
1044 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1045 struct x86_emulate_ops *ops)
1048 struct decode_cache *c = &ctxt->decode;
1051 switch (c->dst.type) {
1053 write_register_operand(&c->dst);
1057 rc = ops->cmpxchg_emulated(
1065 rc = ops->write_emulated(
1071 if (rc == X86EMUL_PROPAGATE_FAULT)
1072 emulate_pf(ctxt, c->dst.addr.mem, err);
1073 if (rc != X86EMUL_CONTINUE)
1082 return X86EMUL_CONTINUE;
1085 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1086 struct x86_emulate_ops *ops)
1088 struct decode_cache *c = &ctxt->decode;
1090 c->dst.type = OP_MEM;
1091 c->dst.bytes = c->op_bytes;
1092 c->dst.val = c->src.val;
1093 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1094 c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
1095 c->regs[VCPU_REGS_RSP]);
1098 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1099 struct x86_emulate_ops *ops,
1100 void *dest, int len)
1102 struct decode_cache *c = &ctxt->decode;
1105 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1106 c->regs[VCPU_REGS_RSP]),
1108 if (rc != X86EMUL_CONTINUE)
1111 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1115 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1116 struct x86_emulate_ops *ops,
1117 void *dest, int len)
1120 unsigned long val, change_mask;
1121 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1122 int cpl = ops->cpl(ctxt->vcpu);
1124 rc = emulate_pop(ctxt, ops, &val, len);
1125 if (rc != X86EMUL_CONTINUE)
1128 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1129 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1131 switch(ctxt->mode) {
1132 case X86EMUL_MODE_PROT64:
1133 case X86EMUL_MODE_PROT32:
1134 case X86EMUL_MODE_PROT16:
1136 change_mask |= EFLG_IOPL;
1138 change_mask |= EFLG_IF;
1140 case X86EMUL_MODE_VM86:
1142 emulate_gp(ctxt, 0);
1143 return X86EMUL_PROPAGATE_FAULT;
1145 change_mask |= EFLG_IF;
1147 default: /* real mode */
1148 change_mask |= (EFLG_IOPL | EFLG_IF);
1152 *(unsigned long *)dest =
1153 (ctxt->eflags & ~change_mask) | (val & change_mask);
1158 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1159 struct x86_emulate_ops *ops, int seg)
1161 struct decode_cache *c = &ctxt->decode;
1163 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1165 emulate_push(ctxt, ops);
1168 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1169 struct x86_emulate_ops *ops, int seg)
1171 struct decode_cache *c = &ctxt->decode;
1172 unsigned long selector;
1175 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1176 if (rc != X86EMUL_CONTINUE)
1179 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1183 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1184 struct x86_emulate_ops *ops)
1186 struct decode_cache *c = &ctxt->decode;
1187 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1188 int rc = X86EMUL_CONTINUE;
1189 int reg = VCPU_REGS_RAX;
1191 while (reg <= VCPU_REGS_RDI) {
1192 (reg == VCPU_REGS_RSP) ?
1193 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1195 emulate_push(ctxt, ops);
1197 rc = writeback(ctxt, ops);
1198 if (rc != X86EMUL_CONTINUE)
1204 /* Disable writeback. */
1205 c->dst.type = OP_NONE;
1210 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1211 struct x86_emulate_ops *ops)
1213 struct decode_cache *c = &ctxt->decode;
1214 int rc = X86EMUL_CONTINUE;
1215 int reg = VCPU_REGS_RDI;
1217 while (reg >= VCPU_REGS_RAX) {
1218 if (reg == VCPU_REGS_RSP) {
1219 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1224 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1225 if (rc != X86EMUL_CONTINUE)
1232 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1233 struct x86_emulate_ops *ops, int irq)
1235 struct decode_cache *c = &ctxt->decode;
1243 /* TODO: Add limit checks */
1244 c->src.val = ctxt->eflags;
1245 emulate_push(ctxt, ops);
1246 rc = writeback(ctxt, ops);
1247 if (rc != X86EMUL_CONTINUE)
1250 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1252 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1253 emulate_push(ctxt, ops);
1254 rc = writeback(ctxt, ops);
1255 if (rc != X86EMUL_CONTINUE)
1258 c->src.val = c->eip;
1259 emulate_push(ctxt, ops);
1260 rc = writeback(ctxt, ops);
1261 if (rc != X86EMUL_CONTINUE)
1264 c->dst.type = OP_NONE;
1266 ops->get_idt(&dt, ctxt->vcpu);
1268 eip_addr = dt.address + (irq << 2);
1269 cs_addr = dt.address + (irq << 2) + 2;
1271 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
1272 if (rc != X86EMUL_CONTINUE)
1275 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
1276 if (rc != X86EMUL_CONTINUE)
1279 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1280 if (rc != X86EMUL_CONTINUE)
1288 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1289 struct x86_emulate_ops *ops, int irq)
1291 switch(ctxt->mode) {
1292 case X86EMUL_MODE_REAL:
1293 return emulate_int_real(ctxt, ops, irq);
1294 case X86EMUL_MODE_VM86:
1295 case X86EMUL_MODE_PROT16:
1296 case X86EMUL_MODE_PROT32:
1297 case X86EMUL_MODE_PROT64:
1299 /* Protected mode interrupts unimplemented yet */
1300 return X86EMUL_UNHANDLEABLE;
1304 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1305 struct x86_emulate_ops *ops)
1307 struct decode_cache *c = &ctxt->decode;
1308 int rc = X86EMUL_CONTINUE;
1309 unsigned long temp_eip = 0;
1310 unsigned long temp_eflags = 0;
1311 unsigned long cs = 0;
1312 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1313 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1314 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1315 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1317 /* TODO: Add stack limit check */
1319 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1321 if (rc != X86EMUL_CONTINUE)
1324 if (temp_eip & ~0xffff) {
1325 emulate_gp(ctxt, 0);
1326 return X86EMUL_PROPAGATE_FAULT;
1329 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1331 if (rc != X86EMUL_CONTINUE)
1334 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1336 if (rc != X86EMUL_CONTINUE)
1339 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1341 if (rc != X86EMUL_CONTINUE)
1347 if (c->op_bytes == 4)
1348 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1349 else if (c->op_bytes == 2) {
1350 ctxt->eflags &= ~0xffff;
1351 ctxt->eflags |= temp_eflags;
1354 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1355 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1360 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1361 struct x86_emulate_ops* ops)
1363 switch(ctxt->mode) {
1364 case X86EMUL_MODE_REAL:
1365 return emulate_iret_real(ctxt, ops);
1366 case X86EMUL_MODE_VM86:
1367 case X86EMUL_MODE_PROT16:
1368 case X86EMUL_MODE_PROT32:
1369 case X86EMUL_MODE_PROT64:
1371 /* iret from protected mode unimplemented yet */
1372 return X86EMUL_UNHANDLEABLE;
1376 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1377 struct x86_emulate_ops *ops)
1379 struct decode_cache *c = &ctxt->decode;
1381 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1384 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1386 struct decode_cache *c = &ctxt->decode;
1387 switch (c->modrm_reg) {
1389 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1392 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1395 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1398 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1400 case 4: /* sal/shl */
1401 case 6: /* sal/shl */
1402 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1405 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1408 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1413 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1414 struct x86_emulate_ops *ops)
1416 struct decode_cache *c = &ctxt->decode;
1417 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1418 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1420 switch (c->modrm_reg) {
1421 case 0 ... 1: /* test */
1422 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1425 c->dst.val = ~c->dst.val;
1428 emulate_1op("neg", c->dst, ctxt->eflags);
1431 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1434 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1437 emulate_1op_rax_rdx("div", c->src, *rax, *rdx, ctxt->eflags);
1440 emulate_1op_rax_rdx("idiv", c->src, *rax, *rdx, ctxt->eflags);
1443 return X86EMUL_UNHANDLEABLE;
1445 return X86EMUL_CONTINUE;
1448 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1449 struct x86_emulate_ops *ops)
1451 struct decode_cache *c = &ctxt->decode;
1453 switch (c->modrm_reg) {
1455 emulate_1op("inc", c->dst, ctxt->eflags);
1458 emulate_1op("dec", c->dst, ctxt->eflags);
1460 case 2: /* call near abs */ {
1463 c->eip = c->src.val;
1464 c->src.val = old_eip;
1465 emulate_push(ctxt, ops);
1468 case 4: /* jmp abs */
1469 c->eip = c->src.val;
1472 emulate_push(ctxt, ops);
1475 return X86EMUL_CONTINUE;
1478 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1479 struct x86_emulate_ops *ops)
1481 struct decode_cache *c = &ctxt->decode;
1482 u64 old = c->dst.orig_val64;
1484 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1485 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1486 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1487 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1488 ctxt->eflags &= ~EFLG_ZF;
1490 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1491 (u32) c->regs[VCPU_REGS_RBX];
1493 ctxt->eflags |= EFLG_ZF;
1495 return X86EMUL_CONTINUE;
1498 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1499 struct x86_emulate_ops *ops)
1501 struct decode_cache *c = &ctxt->decode;
1505 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1506 if (rc != X86EMUL_CONTINUE)
1508 if (c->op_bytes == 4)
1509 c->eip = (u32)c->eip;
1510 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1511 if (rc != X86EMUL_CONTINUE)
1513 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1517 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1518 struct x86_emulate_ops *ops, int seg)
1520 struct decode_cache *c = &ctxt->decode;
1524 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1526 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1527 if (rc != X86EMUL_CONTINUE)
1530 c->dst.val = c->src.val;
1535 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1536 struct x86_emulate_ops *ops, struct desc_struct *cs,
1537 struct desc_struct *ss)
1539 memset(cs, 0, sizeof(struct desc_struct));
1540 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1541 memset(ss, 0, sizeof(struct desc_struct));
1543 cs->l = 0; /* will be adjusted later */
1544 set_desc_base(cs, 0); /* flat segment */
1545 cs->g = 1; /* 4kb granularity */
1546 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1547 cs->type = 0x0b; /* Read, Execute, Accessed */
1549 cs->dpl = 0; /* will be adjusted later */
1553 set_desc_base(ss, 0); /* flat segment */
1554 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1555 ss->g = 1; /* 4kb granularity */
1557 ss->type = 0x03; /* Read/Write, Accessed */
1558 ss->d = 1; /* 32bit stack segment */
1564 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1566 struct decode_cache *c = &ctxt->decode;
1567 struct desc_struct cs, ss;
1571 /* syscall is not available in real mode */
1572 if (ctxt->mode == X86EMUL_MODE_REAL ||
1573 ctxt->mode == X86EMUL_MODE_VM86) {
1575 return X86EMUL_PROPAGATE_FAULT;
1578 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1580 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1582 cs_sel = (u16)(msr_data & 0xfffc);
1583 ss_sel = (u16)(msr_data + 8);
1585 if (is_long_mode(ctxt->vcpu)) {
1589 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1590 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1591 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1592 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1594 c->regs[VCPU_REGS_RCX] = c->eip;
1595 if (is_long_mode(ctxt->vcpu)) {
1596 #ifdef CONFIG_X86_64
1597 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1599 ops->get_msr(ctxt->vcpu,
1600 ctxt->mode == X86EMUL_MODE_PROT64 ?
1601 MSR_LSTAR : MSR_CSTAR, &msr_data);
1604 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1605 ctxt->eflags &= ~(msr_data | EFLG_RF);
1609 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1610 c->eip = (u32)msr_data;
1612 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1615 return X86EMUL_CONTINUE;
1619 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1621 struct decode_cache *c = &ctxt->decode;
1622 struct desc_struct cs, ss;
1626 /* inject #GP if in real mode */
1627 if (ctxt->mode == X86EMUL_MODE_REAL) {
1628 emulate_gp(ctxt, 0);
1629 return X86EMUL_PROPAGATE_FAULT;
1632 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1633 * Therefore, we inject an #UD.
1635 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1637 return X86EMUL_PROPAGATE_FAULT;
1640 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1642 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1643 switch (ctxt->mode) {
1644 case X86EMUL_MODE_PROT32:
1645 if ((msr_data & 0xfffc) == 0x0) {
1646 emulate_gp(ctxt, 0);
1647 return X86EMUL_PROPAGATE_FAULT;
1650 case X86EMUL_MODE_PROT64:
1651 if (msr_data == 0x0) {
1652 emulate_gp(ctxt, 0);
1653 return X86EMUL_PROPAGATE_FAULT;
1658 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1659 cs_sel = (u16)msr_data;
1660 cs_sel &= ~SELECTOR_RPL_MASK;
1661 ss_sel = cs_sel + 8;
1662 ss_sel &= ~SELECTOR_RPL_MASK;
1663 if (ctxt->mode == X86EMUL_MODE_PROT64
1664 || is_long_mode(ctxt->vcpu)) {
1669 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1670 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1671 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1672 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1674 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1677 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1678 c->regs[VCPU_REGS_RSP] = msr_data;
1680 return X86EMUL_CONTINUE;
1684 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1686 struct decode_cache *c = &ctxt->decode;
1687 struct desc_struct cs, ss;
1692 /* inject #GP if in real mode or Virtual 8086 mode */
1693 if (ctxt->mode == X86EMUL_MODE_REAL ||
1694 ctxt->mode == X86EMUL_MODE_VM86) {
1695 emulate_gp(ctxt, 0);
1696 return X86EMUL_PROPAGATE_FAULT;
1699 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1701 if ((c->rex_prefix & 0x8) != 0x0)
1702 usermode = X86EMUL_MODE_PROT64;
1704 usermode = X86EMUL_MODE_PROT32;
1708 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1710 case X86EMUL_MODE_PROT32:
1711 cs_sel = (u16)(msr_data + 16);
1712 if ((msr_data & 0xfffc) == 0x0) {
1713 emulate_gp(ctxt, 0);
1714 return X86EMUL_PROPAGATE_FAULT;
1716 ss_sel = (u16)(msr_data + 24);
1718 case X86EMUL_MODE_PROT64:
1719 cs_sel = (u16)(msr_data + 32);
1720 if (msr_data == 0x0) {
1721 emulate_gp(ctxt, 0);
1722 return X86EMUL_PROPAGATE_FAULT;
1724 ss_sel = cs_sel + 8;
1729 cs_sel |= SELECTOR_RPL_MASK;
1730 ss_sel |= SELECTOR_RPL_MASK;
1732 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1733 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1734 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1735 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1737 c->eip = c->regs[VCPU_REGS_RDX];
1738 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1740 return X86EMUL_CONTINUE;
1743 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1744 struct x86_emulate_ops *ops)
1747 if (ctxt->mode == X86EMUL_MODE_REAL)
1749 if (ctxt->mode == X86EMUL_MODE_VM86)
1751 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1752 return ops->cpl(ctxt->vcpu) > iopl;
1755 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1756 struct x86_emulate_ops *ops,
1759 struct desc_struct tr_seg;
1762 u8 perm, bit_idx = port & 0x7;
1763 unsigned mask = (1 << len) - 1;
1765 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
1768 if (desc_limit_scaled(&tr_seg) < 103)
1770 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
1772 if (r != X86EMUL_CONTINUE)
1774 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1776 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
1777 &perm, 1, ctxt->vcpu, NULL);
1778 if (r != X86EMUL_CONTINUE)
1780 if ((perm >> bit_idx) & mask)
1785 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1786 struct x86_emulate_ops *ops,
1792 if (emulator_bad_iopl(ctxt, ops))
1793 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1796 ctxt->perm_ok = true;
1801 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1802 struct x86_emulate_ops *ops,
1803 struct tss_segment_16 *tss)
1805 struct decode_cache *c = &ctxt->decode;
1808 tss->flag = ctxt->eflags;
1809 tss->ax = c->regs[VCPU_REGS_RAX];
1810 tss->cx = c->regs[VCPU_REGS_RCX];
1811 tss->dx = c->regs[VCPU_REGS_RDX];
1812 tss->bx = c->regs[VCPU_REGS_RBX];
1813 tss->sp = c->regs[VCPU_REGS_RSP];
1814 tss->bp = c->regs[VCPU_REGS_RBP];
1815 tss->si = c->regs[VCPU_REGS_RSI];
1816 tss->di = c->regs[VCPU_REGS_RDI];
1818 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1819 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1820 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1821 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1822 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1825 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1826 struct x86_emulate_ops *ops,
1827 struct tss_segment_16 *tss)
1829 struct decode_cache *c = &ctxt->decode;
1833 ctxt->eflags = tss->flag | 2;
1834 c->regs[VCPU_REGS_RAX] = tss->ax;
1835 c->regs[VCPU_REGS_RCX] = tss->cx;
1836 c->regs[VCPU_REGS_RDX] = tss->dx;
1837 c->regs[VCPU_REGS_RBX] = tss->bx;
1838 c->regs[VCPU_REGS_RSP] = tss->sp;
1839 c->regs[VCPU_REGS_RBP] = tss->bp;
1840 c->regs[VCPU_REGS_RSI] = tss->si;
1841 c->regs[VCPU_REGS_RDI] = tss->di;
1844 * SDM says that segment selectors are loaded before segment
1847 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1848 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1849 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1850 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1851 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1854 * Now load segment descriptors. If fault happenes at this stage
1855 * it is handled in a context of new task
1857 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1858 if (ret != X86EMUL_CONTINUE)
1860 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1861 if (ret != X86EMUL_CONTINUE)
1863 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1864 if (ret != X86EMUL_CONTINUE)
1866 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1867 if (ret != X86EMUL_CONTINUE)
1869 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1870 if (ret != X86EMUL_CONTINUE)
1873 return X86EMUL_CONTINUE;
1876 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1877 struct x86_emulate_ops *ops,
1878 u16 tss_selector, u16 old_tss_sel,
1879 ulong old_tss_base, struct desc_struct *new_desc)
1881 struct tss_segment_16 tss_seg;
1883 u32 err, new_tss_base = get_desc_base(new_desc);
1885 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1887 if (ret == X86EMUL_PROPAGATE_FAULT) {
1888 /* FIXME: need to provide precise fault address */
1889 emulate_pf(ctxt, old_tss_base, err);
1893 save_state_to_tss16(ctxt, ops, &tss_seg);
1895 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1897 if (ret == X86EMUL_PROPAGATE_FAULT) {
1898 /* FIXME: need to provide precise fault address */
1899 emulate_pf(ctxt, old_tss_base, err);
1903 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1905 if (ret == X86EMUL_PROPAGATE_FAULT) {
1906 /* FIXME: need to provide precise fault address */
1907 emulate_pf(ctxt, new_tss_base, err);
1911 if (old_tss_sel != 0xffff) {
1912 tss_seg.prev_task_link = old_tss_sel;
1914 ret = ops->write_std(new_tss_base,
1915 &tss_seg.prev_task_link,
1916 sizeof tss_seg.prev_task_link,
1918 if (ret == X86EMUL_PROPAGATE_FAULT) {
1919 /* FIXME: need to provide precise fault address */
1920 emulate_pf(ctxt, new_tss_base, err);
1925 return load_state_from_tss16(ctxt, ops, &tss_seg);
1928 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
1929 struct x86_emulate_ops *ops,
1930 struct tss_segment_32 *tss)
1932 struct decode_cache *c = &ctxt->decode;
1934 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
1936 tss->eflags = ctxt->eflags;
1937 tss->eax = c->regs[VCPU_REGS_RAX];
1938 tss->ecx = c->regs[VCPU_REGS_RCX];
1939 tss->edx = c->regs[VCPU_REGS_RDX];
1940 tss->ebx = c->regs[VCPU_REGS_RBX];
1941 tss->esp = c->regs[VCPU_REGS_RSP];
1942 tss->ebp = c->regs[VCPU_REGS_RBP];
1943 tss->esi = c->regs[VCPU_REGS_RSI];
1944 tss->edi = c->regs[VCPU_REGS_RDI];
1946 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1947 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1948 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1949 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1950 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
1951 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
1952 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1955 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
1956 struct x86_emulate_ops *ops,
1957 struct tss_segment_32 *tss)
1959 struct decode_cache *c = &ctxt->decode;
1962 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
1963 emulate_gp(ctxt, 0);
1964 return X86EMUL_PROPAGATE_FAULT;
1967 ctxt->eflags = tss->eflags | 2;
1968 c->regs[VCPU_REGS_RAX] = tss->eax;
1969 c->regs[VCPU_REGS_RCX] = tss->ecx;
1970 c->regs[VCPU_REGS_RDX] = tss->edx;
1971 c->regs[VCPU_REGS_RBX] = tss->ebx;
1972 c->regs[VCPU_REGS_RSP] = tss->esp;
1973 c->regs[VCPU_REGS_RBP] = tss->ebp;
1974 c->regs[VCPU_REGS_RSI] = tss->esi;
1975 c->regs[VCPU_REGS_RDI] = tss->edi;
1978 * SDM says that segment selectors are loaded before segment
1981 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
1982 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1983 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1984 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1985 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1986 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
1987 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
1990 * Now load segment descriptors. If fault happenes at this stage
1991 * it is handled in a context of new task
1993 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
1994 if (ret != X86EMUL_CONTINUE)
1996 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1997 if (ret != X86EMUL_CONTINUE)
1999 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2000 if (ret != X86EMUL_CONTINUE)
2002 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2003 if (ret != X86EMUL_CONTINUE)
2005 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2006 if (ret != X86EMUL_CONTINUE)
2008 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2009 if (ret != X86EMUL_CONTINUE)
2011 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2012 if (ret != X86EMUL_CONTINUE)
2015 return X86EMUL_CONTINUE;
2018 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2019 struct x86_emulate_ops *ops,
2020 u16 tss_selector, u16 old_tss_sel,
2021 ulong old_tss_base, struct desc_struct *new_desc)
2023 struct tss_segment_32 tss_seg;
2025 u32 err, new_tss_base = get_desc_base(new_desc);
2027 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2029 if (ret == X86EMUL_PROPAGATE_FAULT) {
2030 /* FIXME: need to provide precise fault address */
2031 emulate_pf(ctxt, old_tss_base, err);
2035 save_state_to_tss32(ctxt, ops, &tss_seg);
2037 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2039 if (ret == X86EMUL_PROPAGATE_FAULT) {
2040 /* FIXME: need to provide precise fault address */
2041 emulate_pf(ctxt, old_tss_base, err);
2045 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2047 if (ret == X86EMUL_PROPAGATE_FAULT) {
2048 /* FIXME: need to provide precise fault address */
2049 emulate_pf(ctxt, new_tss_base, err);
2053 if (old_tss_sel != 0xffff) {
2054 tss_seg.prev_task_link = old_tss_sel;
2056 ret = ops->write_std(new_tss_base,
2057 &tss_seg.prev_task_link,
2058 sizeof tss_seg.prev_task_link,
2060 if (ret == X86EMUL_PROPAGATE_FAULT) {
2061 /* FIXME: need to provide precise fault address */
2062 emulate_pf(ctxt, new_tss_base, err);
2067 return load_state_from_tss32(ctxt, ops, &tss_seg);
2070 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2071 struct x86_emulate_ops *ops,
2072 u16 tss_selector, int reason,
2073 bool has_error_code, u32 error_code)
2075 struct desc_struct curr_tss_desc, next_tss_desc;
2077 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2078 ulong old_tss_base =
2079 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2082 /* FIXME: old_tss_base == ~0 ? */
2084 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2085 if (ret != X86EMUL_CONTINUE)
2087 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2088 if (ret != X86EMUL_CONTINUE)
2091 /* FIXME: check that next_tss_desc is tss */
2093 if (reason != TASK_SWITCH_IRET) {
2094 if ((tss_selector & 3) > next_tss_desc.dpl ||
2095 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2096 emulate_gp(ctxt, 0);
2097 return X86EMUL_PROPAGATE_FAULT;
2101 desc_limit = desc_limit_scaled(&next_tss_desc);
2102 if (!next_tss_desc.p ||
2103 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2104 desc_limit < 0x2b)) {
2105 emulate_ts(ctxt, tss_selector & 0xfffc);
2106 return X86EMUL_PROPAGATE_FAULT;
2109 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2110 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2111 write_segment_descriptor(ctxt, ops, old_tss_sel,
2115 if (reason == TASK_SWITCH_IRET)
2116 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2118 /* set back link to prev task only if NT bit is set in eflags
2119 note that old_tss_sel is not used afetr this point */
2120 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2121 old_tss_sel = 0xffff;
2123 if (next_tss_desc.type & 8)
2124 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2125 old_tss_base, &next_tss_desc);
2127 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2128 old_tss_base, &next_tss_desc);
2129 if (ret != X86EMUL_CONTINUE)
2132 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2133 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2135 if (reason != TASK_SWITCH_IRET) {
2136 next_tss_desc.type |= (1 << 1); /* set busy flag */
2137 write_segment_descriptor(ctxt, ops, tss_selector,
2141 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2142 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2143 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2145 if (has_error_code) {
2146 struct decode_cache *c = &ctxt->decode;
2148 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2150 c->src.val = (unsigned long) error_code;
2151 emulate_push(ctxt, ops);
2157 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2158 u16 tss_selector, int reason,
2159 bool has_error_code, u32 error_code)
2161 struct x86_emulate_ops *ops = ctxt->ops;
2162 struct decode_cache *c = &ctxt->decode;
2166 c->dst.type = OP_NONE;
2168 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2169 has_error_code, error_code);
2171 if (rc == X86EMUL_CONTINUE) {
2172 rc = writeback(ctxt, ops);
2173 if (rc == X86EMUL_CONTINUE)
2177 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2180 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2181 int reg, struct operand *op)
2183 struct decode_cache *c = &ctxt->decode;
2184 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2186 register_address_increment(c, &c->regs[reg], df * op->bytes);
2187 op->addr.mem = register_address(c, base, c->regs[reg]);
2190 static int em_push(struct x86_emulate_ctxt *ctxt)
2192 emulate_push(ctxt, ctxt->ops);
2193 return X86EMUL_CONTINUE;
2196 static int em_das(struct x86_emulate_ctxt *ctxt)
2198 struct decode_cache *c = &ctxt->decode;
2200 bool af, cf, old_cf;
2202 cf = ctxt->eflags & X86_EFLAGS_CF;
2208 af = ctxt->eflags & X86_EFLAGS_AF;
2209 if ((al & 0x0f) > 9 || af) {
2211 cf = old_cf | (al >= 250);
2216 if (old_al > 0x99 || old_cf) {
2222 /* Set PF, ZF, SF */
2223 c->src.type = OP_IMM;
2226 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2227 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2229 ctxt->eflags |= X86_EFLAGS_CF;
2231 ctxt->eflags |= X86_EFLAGS_AF;
2232 return X86EMUL_CONTINUE;
2235 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2237 struct decode_cache *c = &ctxt->decode;
2242 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2245 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2246 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2247 return X86EMUL_CONTINUE;
2250 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2252 c->src.val = old_cs;
2253 emulate_push(ctxt, ctxt->ops);
2254 rc = writeback(ctxt, ctxt->ops);
2255 if (rc != X86EMUL_CONTINUE)
2258 c->src.val = old_eip;
2259 emulate_push(ctxt, ctxt->ops);
2260 rc = writeback(ctxt, ctxt->ops);
2261 if (rc != X86EMUL_CONTINUE)
2264 c->dst.type = OP_NONE;
2266 return X86EMUL_CONTINUE;
2269 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2271 struct decode_cache *c = &ctxt->decode;
2274 c->dst.type = OP_REG;
2275 c->dst.addr.reg = &c->eip;
2276 c->dst.bytes = c->op_bytes;
2277 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2278 if (rc != X86EMUL_CONTINUE)
2280 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2281 return X86EMUL_CONTINUE;
2284 static int em_imul(struct x86_emulate_ctxt *ctxt)
2286 struct decode_cache *c = &ctxt->decode;
2288 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2289 return X86EMUL_CONTINUE;
2292 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2294 struct decode_cache *c = &ctxt->decode;
2296 c->dst.val = c->src2.val;
2297 return em_imul(ctxt);
2300 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2302 struct decode_cache *c = &ctxt->decode;
2304 c->dst.type = OP_REG;
2305 c->dst.bytes = c->src.bytes;
2306 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2307 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2309 return X86EMUL_CONTINUE;
2312 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2314 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2315 struct decode_cache *c = &ctxt->decode;
2318 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
2319 emulate_gp(ctxt, 0);
2320 return X86EMUL_PROPAGATE_FAULT;
2322 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2323 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2324 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2325 return X86EMUL_CONTINUE;
2328 #define D(_y) { .flags = (_y) }
2330 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2331 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2332 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2334 static struct opcode group1[] = {
2338 static struct opcode group1A[] = {
2339 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2342 static struct opcode group3[] = {
2343 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2344 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2345 X4(D(SrcMem | ModRM)),
2348 static struct opcode group4[] = {
2349 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2353 static struct opcode group5[] = {
2354 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2355 D(SrcMem | ModRM | Stack),
2356 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2357 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2358 D(SrcMem | ModRM | Stack), N,
2361 static struct group_dual group7 = { {
2362 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2363 D(SrcNone | ModRM | DstMem | Mov), N,
2364 D(SrcMem16 | ModRM | Mov | Priv),
2365 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2367 D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
2368 D(SrcNone | ModRM | DstMem | Mov), N,
2369 D(SrcMem16 | ModRM | Mov | Priv), N,
2372 static struct opcode group8[] = {
2374 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2375 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2378 static struct group_dual group9 = { {
2379 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2381 N, N, N, N, N, N, N, N,
2384 static struct opcode opcode_table[256] = {
2386 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2387 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2388 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2389 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2391 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2392 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2393 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2394 D(ImplicitOps | Stack | No64), N,
2396 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2397 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2398 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2399 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2401 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2402 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2403 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2404 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2406 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2407 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2408 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2410 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2411 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2412 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm),
2413 N, I(ByteOp | DstAcc | No64, em_das),
2415 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2416 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2417 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2419 D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
2420 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2421 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2426 X8(I(SrcReg | Stack, em_push)),
2428 X8(D(DstReg | Stack)),
2430 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2431 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2434 I(SrcImm | Mov | Stack, em_push),
2435 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2436 I(SrcImmByte | Mov | Stack, em_push),
2437 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2438 D(DstDI | ByteOp | Mov | String), D(DstDI | Mov | String), /* insb, insw/insd */
2439 D(SrcSI | ByteOp | ImplicitOps | String), D(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2443 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2444 G(DstMem | SrcImm | ModRM | Group, group1),
2445 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2446 G(DstMem | SrcImmByte | ModRM | Group, group1),
2447 D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
2448 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2450 D(ByteOp | DstMem | SrcReg | ModRM | Mov), D(DstMem | SrcReg | ModRM | Mov),
2451 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem | ModRM | Mov),
2452 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2453 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2455 X8(D(SrcAcc | DstReg)),
2457 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2458 D(SrcImmFAddr | No64), N,
2459 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2461 D(ByteOp | DstAcc | SrcMem | Mov | MemAbs), D(DstAcc | SrcMem | Mov | MemAbs),
2462 D(ByteOp | DstMem | SrcAcc | Mov | MemAbs), D(DstMem | SrcAcc | Mov | MemAbs),
2463 D(ByteOp | SrcSI | DstDI | Mov | String), D(SrcSI | DstDI | Mov | String),
2464 D(ByteOp | SrcSI | DstDI | String), D(SrcSI | DstDI | String),
2466 D(DstAcc | SrcImmByte | ByteOp), D(DstAcc | SrcImm),
2467 D(ByteOp | SrcAcc | DstDI | Mov | String), D(SrcAcc | DstDI | Mov | String),
2468 D(ByteOp | SrcSI | DstAcc | Mov | String), D(SrcSI | DstAcc | Mov | String),
2469 D(ByteOp | SrcAcc | DstDI | String), D(SrcAcc | DstDI | String),
2471 X8(D(ByteOp | DstReg | SrcImm | Mov)),
2473 X8(D(DstReg | SrcImm | Mov)),
2475 D(ByteOp | DstMem | SrcImm | ModRM), D(DstMem | SrcImmByte | ModRM),
2476 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2477 D(ImplicitOps | Stack),
2478 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2479 D(ByteOp | DstMem | SrcImm | ModRM | Mov), D(DstMem | SrcImm | ModRM | Mov),
2481 N, N, N, D(ImplicitOps | Stack),
2482 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2484 D(ByteOp | DstMem | SrcOne | ModRM), D(DstMem | SrcOne | ModRM),
2485 D(ByteOp | DstMem | ModRM), D(DstMem | ModRM),
2488 N, N, N, N, N, N, N, N,
2491 D(ByteOp | SrcImmUByte | DstAcc), D(SrcImmUByte | DstAcc),
2492 D(ByteOp | SrcAcc | DstImmUByte), D(SrcAcc | DstImmUByte),
2494 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2495 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2496 D(SrcNone | ByteOp | DstAcc), D(SrcNone | DstAcc),
2497 D(ByteOp | SrcAcc | ImplicitOps), D(SrcAcc | ImplicitOps),
2500 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2502 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2503 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2506 static struct opcode twobyte_table[256] = {
2508 N, GD(0, &group7), N, N,
2509 N, D(ImplicitOps), D(ImplicitOps | Priv), N,
2510 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2511 N, D(ImplicitOps | ModRM), N, N,
2513 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2515 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2516 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2518 N, N, N, N, N, N, N, N,
2520 D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
2521 D(ImplicitOps | Priv), N,
2522 D(ImplicitOps), D(ImplicitOps | Priv), N, N,
2523 N, N, N, N, N, N, N, N,
2525 X16(D(DstReg | SrcMem | ModRM | Mov)),
2527 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2529 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2531 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2535 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2537 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2538 N, D(DstMem | SrcReg | ModRM | BitOp),
2539 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2540 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2542 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2543 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2544 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2545 D(DstMem | SrcReg | Src2CL | ModRM),
2546 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2548 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2549 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2550 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2551 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2554 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2555 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2556 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2558 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2559 N, D(DstMem | SrcReg | ModRM | Mov),
2560 N, N, N, GD(0, &group9),
2561 N, N, N, N, N, N, N, N,
2563 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2565 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2567 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2576 static unsigned imm_size(struct decode_cache *c)
2580 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2586 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2587 unsigned size, bool sign_extension)
2589 struct decode_cache *c = &ctxt->decode;
2590 struct x86_emulate_ops *ops = ctxt->ops;
2591 int rc = X86EMUL_CONTINUE;
2595 op->addr.mem = c->eip;
2596 /* NB. Immediates are sign-extended as necessary. */
2597 switch (op->bytes) {
2599 op->val = insn_fetch(s8, 1, c->eip);
2602 op->val = insn_fetch(s16, 2, c->eip);
2605 op->val = insn_fetch(s32, 4, c->eip);
2608 if (!sign_extension) {
2609 switch (op->bytes) {
2617 op->val &= 0xffffffff;
2626 x86_decode_insn(struct x86_emulate_ctxt *ctxt)
2628 struct x86_emulate_ops *ops = ctxt->ops;
2629 struct decode_cache *c = &ctxt->decode;
2630 int rc = X86EMUL_CONTINUE;
2631 int mode = ctxt->mode;
2632 int def_op_bytes, def_ad_bytes, dual, goffset;
2633 struct opcode opcode, *g_mod012, *g_mod3;
2634 struct operand memop = { .type = OP_NONE };
2636 /* we cannot decode insn before we complete previous rep insn */
2637 WARN_ON(ctxt->restart);
2640 c->fetch.start = c->fetch.end = c->eip;
2641 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2644 case X86EMUL_MODE_REAL:
2645 case X86EMUL_MODE_VM86:
2646 case X86EMUL_MODE_PROT16:
2647 def_op_bytes = def_ad_bytes = 2;
2649 case X86EMUL_MODE_PROT32:
2650 def_op_bytes = def_ad_bytes = 4;
2652 #ifdef CONFIG_X86_64
2653 case X86EMUL_MODE_PROT64:
2662 c->op_bytes = def_op_bytes;
2663 c->ad_bytes = def_ad_bytes;
2665 /* Legacy prefixes. */
2667 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2668 case 0x66: /* operand-size override */
2669 /* switch between 2/4 bytes */
2670 c->op_bytes = def_op_bytes ^ 6;
2672 case 0x67: /* address-size override */
2673 if (mode == X86EMUL_MODE_PROT64)
2674 /* switch between 4/8 bytes */
2675 c->ad_bytes = def_ad_bytes ^ 12;
2677 /* switch between 2/4 bytes */
2678 c->ad_bytes = def_ad_bytes ^ 6;
2680 case 0x26: /* ES override */
2681 case 0x2e: /* CS override */
2682 case 0x36: /* SS override */
2683 case 0x3e: /* DS override */
2684 set_seg_override(c, (c->b >> 3) & 3);
2686 case 0x64: /* FS override */
2687 case 0x65: /* GS override */
2688 set_seg_override(c, c->b & 7);
2690 case 0x40 ... 0x4f: /* REX */
2691 if (mode != X86EMUL_MODE_PROT64)
2693 c->rex_prefix = c->b;
2695 case 0xf0: /* LOCK */
2698 case 0xf2: /* REPNE/REPNZ */
2699 c->rep_prefix = REPNE_PREFIX;
2701 case 0xf3: /* REP/REPE/REPZ */
2702 c->rep_prefix = REPE_PREFIX;
2708 /* Any legacy prefix after a REX prefix nullifies its effect. */
2716 if (c->rex_prefix & 8)
2717 c->op_bytes = 8; /* REX.W */
2719 /* Opcode byte(s). */
2720 opcode = opcode_table[c->b];
2721 /* Two-byte opcode? */
2724 c->b = insn_fetch(u8, 1, c->eip);
2725 opcode = twobyte_table[c->b];
2727 c->d = opcode.flags;
2730 dual = c->d & GroupDual;
2731 c->modrm = insn_fetch(u8, 1, c->eip);
2734 if (c->d & GroupDual) {
2735 g_mod012 = opcode.u.gdual->mod012;
2736 g_mod3 = opcode.u.gdual->mod3;
2738 g_mod012 = g_mod3 = opcode.u.group;
2740 c->d &= ~(Group | GroupDual);
2742 goffset = (c->modrm >> 3) & 7;
2744 if ((c->modrm >> 6) == 3)
2745 opcode = g_mod3[goffset];
2747 opcode = g_mod012[goffset];
2748 c->d |= opcode.flags;
2751 c->execute = opcode.u.execute;
2754 if (c->d == 0 || (c->d & Undefined)) {
2755 DPRINTF("Cannot emulate %02x\n", c->b);
2759 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2762 if (c->d & Op3264) {
2763 if (mode == X86EMUL_MODE_PROT64)
2769 /* ModRM and SIB bytes. */
2771 rc = decode_modrm(ctxt, ops, &memop);
2772 if (!c->has_seg_override)
2773 set_seg_override(c, c->modrm_seg);
2774 } else if (c->d & MemAbs)
2775 rc = decode_abs(ctxt, ops, &memop);
2776 if (rc != X86EMUL_CONTINUE)
2779 if (!c->has_seg_override)
2780 set_seg_override(c, VCPU_SREG_DS);
2782 if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
2783 memop.addr.mem += seg_override_base(ctxt, ops, c);
2785 if (memop.type == OP_MEM && c->ad_bytes != 8)
2786 memop.addr.mem = (u32)memop.addr.mem;
2788 if (memop.type == OP_MEM && c->rip_relative)
2789 memop.addr.mem += c->eip;
2792 * Decode and fetch the source operand: register, memory
2795 switch (c->d & SrcMask) {
2799 decode_register_operand(&c->src, c, 0);
2808 memop.bytes = (c->d & ByteOp) ? 1 :
2814 rc = decode_imm(ctxt, &c->src, 2, false);
2817 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
2820 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2823 rc = decode_imm(ctxt, &c->src, 1, true);
2826 rc = decode_imm(ctxt, &c->src, 1, false);
2829 c->src.type = OP_REG;
2830 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2831 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2832 fetch_register_operand(&c->src);
2839 c->src.type = OP_MEM;
2840 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2842 register_address(c, seg_override_base(ctxt, ops, c),
2843 c->regs[VCPU_REGS_RSI]);
2847 c->src.type = OP_IMM;
2848 c->src.addr.mem = c->eip;
2849 c->src.bytes = c->op_bytes + 2;
2850 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2853 memop.bytes = c->op_bytes + 2;
2858 if (rc != X86EMUL_CONTINUE)
2862 * Decode and fetch the second source operand: register, memory
2865 switch (c->d & Src2Mask) {
2870 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2873 rc = decode_imm(ctxt, &c->src2, 1, true);
2880 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
2884 if (rc != X86EMUL_CONTINUE)
2887 /* Decode and fetch the destination operand: register or memory. */
2888 switch (c->d & DstMask) {
2890 decode_register_operand(&c->dst, c,
2891 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2894 c->dst.type = OP_IMM;
2895 c->dst.addr.mem = c->eip;
2897 c->dst.val = insn_fetch(u8, 1, c->eip);
2902 if ((c->d & DstMask) == DstMem64)
2905 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2907 fetch_bit_operand(c);
2908 c->dst.orig_val = c->dst.val;
2911 c->dst.type = OP_REG;
2912 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2913 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2914 fetch_register_operand(&c->dst);
2915 c->dst.orig_val = c->dst.val;
2918 c->dst.type = OP_MEM;
2919 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2921 register_address(c, es_base(ctxt, ops),
2922 c->regs[VCPU_REGS_RDI]);
2926 /* Special instructions do their own operand decoding. */
2928 c->dst.type = OP_NONE; /* Disable writeback. */
2933 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2937 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
2939 struct x86_emulate_ops *ops = ctxt->ops;
2941 struct decode_cache *c = &ctxt->decode;
2942 int rc = X86EMUL_CONTINUE;
2943 int saved_dst_type = c->dst.type;
2944 int irq; /* Used for int 3, int, and into */
2946 ctxt->decode.mem_read.pos = 0;
2948 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2953 /* LOCK prefix is allowed only with some instructions */
2954 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2959 /* Privileged instruction can be executed only in CPL=0 */
2960 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2961 emulate_gp(ctxt, 0);
2965 if (c->rep_prefix && (c->d & String)) {
2966 ctxt->restart = true;
2967 /* All REP prefixes have the same first termination condition */
2968 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2969 ctxt->restart = false;
2975 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
2976 rc = read_emulated(ctxt, ops, c->src.addr.mem,
2977 c->src.valptr, c->src.bytes);
2978 if (rc != X86EMUL_CONTINUE)
2980 c->src.orig_val64 = c->src.val64;
2983 if (c->src2.type == OP_MEM) {
2984 rc = read_emulated(ctxt, ops, c->src2.addr.mem,
2985 &c->src2.val, c->src2.bytes);
2986 if (rc != X86EMUL_CONTINUE)
2990 if ((c->d & DstMask) == ImplicitOps)
2994 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
2995 /* optimisation - avoid slow emulated read if Mov */
2996 rc = read_emulated(ctxt, ops, c->dst.addr.mem,
2997 &c->dst.val, c->dst.bytes);
2998 if (rc != X86EMUL_CONTINUE)
3001 c->dst.orig_val = c->dst.val;
3006 rc = c->execute(ctxt);
3007 if (rc != X86EMUL_CONTINUE)
3018 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3020 case 0x06: /* push es */
3021 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3023 case 0x07: /* pop es */
3024 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3025 if (rc != X86EMUL_CONTINUE)
3030 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3032 case 0x0e: /* push cs */
3033 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3037 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3039 case 0x16: /* push ss */
3040 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3042 case 0x17: /* pop ss */
3043 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3044 if (rc != X86EMUL_CONTINUE)
3049 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3051 case 0x1e: /* push ds */
3052 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3054 case 0x1f: /* pop ds */
3055 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3056 if (rc != X86EMUL_CONTINUE)
3061 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3065 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3069 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3073 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3075 case 0x40 ... 0x47: /* inc r16/r32 */
3076 emulate_1op("inc", c->dst, ctxt->eflags);
3078 case 0x48 ... 0x4f: /* dec r16/r32 */
3079 emulate_1op("dec", c->dst, ctxt->eflags);
3081 case 0x58 ... 0x5f: /* pop reg */
3083 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3084 if (rc != X86EMUL_CONTINUE)
3087 case 0x60: /* pusha */
3088 rc = emulate_pusha(ctxt, ops);
3089 if (rc != X86EMUL_CONTINUE)
3092 case 0x61: /* popa */
3093 rc = emulate_popa(ctxt, ops);
3094 if (rc != X86EMUL_CONTINUE)
3097 case 0x63: /* movsxd */
3098 if (ctxt->mode != X86EMUL_MODE_PROT64)
3099 goto cannot_emulate;
3100 c->dst.val = (s32) c->src.val;
3102 case 0x6c: /* insb */
3103 case 0x6d: /* insw/insd */
3104 c->src.val = c->regs[VCPU_REGS_RDX];
3106 case 0x6e: /* outsb */
3107 case 0x6f: /* outsw/outsd */
3108 c->dst.val = c->regs[VCPU_REGS_RDX];
3111 case 0x70 ... 0x7f: /* jcc (short) */
3112 if (test_cc(c->b, ctxt->eflags))
3113 jmp_rel(c, c->src.val);
3115 case 0x80 ... 0x83: /* Grp1 */
3116 switch (c->modrm_reg) {
3137 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3139 case 0x86 ... 0x87: /* xchg */
3141 /* Write back the register source. */
3142 c->src.val = c->dst.val;
3143 write_register_operand(&c->src);
3145 * Write back the memory destination with implicit LOCK
3148 c->dst.val = c->src.orig_val;
3151 case 0x88 ... 0x8b: /* mov */
3153 case 0x8c: /* mov r/m, sreg */
3154 if (c->modrm_reg > VCPU_SREG_GS) {
3158 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3160 case 0x8d: /* lea r16/r32, m */
3161 c->dst.val = c->src.addr.mem;
3163 case 0x8e: { /* mov seg, r/m16 */
3168 if (c->modrm_reg == VCPU_SREG_CS ||
3169 c->modrm_reg > VCPU_SREG_GS) {
3174 if (c->modrm_reg == VCPU_SREG_SS)
3175 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3177 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3179 c->dst.type = OP_NONE; /* Disable writeback. */
3182 case 0x8f: /* pop (sole member of Grp1a) */
3183 rc = emulate_grp1a(ctxt, ops);
3184 if (rc != X86EMUL_CONTINUE)
3187 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3188 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3191 case 0x98: /* cbw/cwde/cdqe */
3192 switch (c->op_bytes) {
3193 case 2: c->dst.val = (s8)c->dst.val; break;
3194 case 4: c->dst.val = (s16)c->dst.val; break;
3195 case 8: c->dst.val = (s32)c->dst.val; break;
3198 case 0x9c: /* pushf */
3199 c->src.val = (unsigned long) ctxt->eflags;
3200 emulate_push(ctxt, ops);
3202 case 0x9d: /* popf */
3203 c->dst.type = OP_REG;
3204 c->dst.addr.reg = &ctxt->eflags;
3205 c->dst.bytes = c->op_bytes;
3206 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3207 if (rc != X86EMUL_CONTINUE)
3210 case 0xa0 ... 0xa3: /* mov */
3211 case 0xa4 ... 0xa5: /* movs */
3213 case 0xa6 ... 0xa7: /* cmps */
3214 c->dst.type = OP_NONE; /* Disable writeback. */
3215 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
3217 case 0xa8 ... 0xa9: /* test ax, imm */
3219 case 0xaa ... 0xab: /* stos */
3220 case 0xac ... 0xad: /* lods */
3222 case 0xae ... 0xaf: /* scas */
3224 case 0xb0 ... 0xbf: /* mov r, imm */
3229 case 0xc3: /* ret */
3230 c->dst.type = OP_REG;
3231 c->dst.addr.reg = &c->eip;
3232 c->dst.bytes = c->op_bytes;
3233 goto pop_instruction;
3234 case 0xc4: /* les */
3235 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3236 if (rc != X86EMUL_CONTINUE)
3239 case 0xc5: /* lds */
3240 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3241 if (rc != X86EMUL_CONTINUE)
3244 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
3246 c->dst.val = c->src.val;
3248 case 0xcb: /* ret far */
3249 rc = emulate_ret_far(ctxt, ops);
3250 if (rc != X86EMUL_CONTINUE)
3253 case 0xcc: /* int3 */
3256 case 0xcd: /* int n */
3259 rc = emulate_int(ctxt, ops, irq);
3260 if (rc != X86EMUL_CONTINUE)
3263 case 0xce: /* into */
3264 if (ctxt->eflags & EFLG_OF) {
3269 case 0xcf: /* iret */
3270 rc = emulate_iret(ctxt, ops);
3272 if (rc != X86EMUL_CONTINUE)
3275 case 0xd0 ... 0xd1: /* Grp2 */
3278 case 0xd2 ... 0xd3: /* Grp2 */
3279 c->src.val = c->regs[VCPU_REGS_RCX];
3282 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3283 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3284 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3285 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3286 jmp_rel(c, c->src.val);
3288 case 0xe3: /* jcxz/jecxz/jrcxz */
3289 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3290 jmp_rel(c, c->src.val);
3292 case 0xe4: /* inb */
3295 case 0xe6: /* outb */
3296 case 0xe7: /* out */
3298 case 0xe8: /* call (near) */ {
3299 long int rel = c->src.val;
3300 c->src.val = (unsigned long) c->eip;
3302 emulate_push(ctxt, ops);
3305 case 0xe9: /* jmp rel */
3307 case 0xea: { /* jmp far */
3310 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3312 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3316 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3320 jmp: /* jmp rel short */
3321 jmp_rel(c, c->src.val);
3322 c->dst.type = OP_NONE; /* Disable writeback. */
3324 case 0xec: /* in al,dx */
3325 case 0xed: /* in (e/r)ax,dx */
3326 c->src.val = c->regs[VCPU_REGS_RDX];
3328 c->dst.bytes = min(c->dst.bytes, 4u);
3329 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3330 emulate_gp(ctxt, 0);
3333 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3335 goto done; /* IO is needed */
3337 case 0xee: /* out dx,al */
3338 case 0xef: /* out dx,(e/r)ax */
3339 c->dst.val = c->regs[VCPU_REGS_RDX];
3341 c->src.bytes = min(c->src.bytes, 4u);
3342 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3344 emulate_gp(ctxt, 0);
3347 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3348 &c->src.val, 1, ctxt->vcpu);
3349 c->dst.type = OP_NONE; /* Disable writeback. */
3351 case 0xf4: /* hlt */
3352 ctxt->vcpu->arch.halt_request = 1;
3354 case 0xf5: /* cmc */
3355 /* complement carry flag from eflags reg */
3356 ctxt->eflags ^= EFLG_CF;
3358 case 0xf6 ... 0xf7: /* Grp3 */
3359 if (emulate_grp3(ctxt, ops) != X86EMUL_CONTINUE)
3360 goto cannot_emulate;
3362 case 0xf8: /* clc */
3363 ctxt->eflags &= ~EFLG_CF;
3365 case 0xf9: /* stc */
3366 ctxt->eflags |= EFLG_CF;
3368 case 0xfa: /* cli */
3369 if (emulator_bad_iopl(ctxt, ops)) {
3370 emulate_gp(ctxt, 0);
3373 ctxt->eflags &= ~X86_EFLAGS_IF;
3375 case 0xfb: /* sti */
3376 if (emulator_bad_iopl(ctxt, ops)) {
3377 emulate_gp(ctxt, 0);
3380 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3381 ctxt->eflags |= X86_EFLAGS_IF;
3384 case 0xfc: /* cld */
3385 ctxt->eflags &= ~EFLG_DF;
3387 case 0xfd: /* std */
3388 ctxt->eflags |= EFLG_DF;
3390 case 0xfe: /* Grp4 */
3392 rc = emulate_grp45(ctxt, ops);
3393 if (rc != X86EMUL_CONTINUE)
3396 case 0xff: /* Grp5 */
3397 if (c->modrm_reg == 5)
3401 goto cannot_emulate;
3405 rc = writeback(ctxt, ops);
3406 if (rc != X86EMUL_CONTINUE)
3410 * restore dst type in case the decoding will be reused
3411 * (happens for string instruction )
3413 c->dst.type = saved_dst_type;
3415 if ((c->d & SrcMask) == SrcSI)
3416 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
3417 VCPU_REGS_RSI, &c->src);
3419 if ((c->d & DstMask) == DstDI)
3420 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3423 if (c->rep_prefix && (c->d & String)) {
3424 struct read_cache *rc = &ctxt->decode.io_read;
3425 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3426 /* The second termination condition only applies for REPE
3427 * and REPNE. Test if the repeat string operation prefix is
3428 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3429 * corresponding termination condition according to:
3430 * - if REPE/REPZ and ZF = 0 then done
3431 * - if REPNE/REPNZ and ZF = 1 then done
3433 if (((c->b == 0xa6) || (c->b == 0xa7) ||
3434 (c->b == 0xae) || (c->b == 0xaf))
3435 && (((c->rep_prefix == REPE_PREFIX) &&
3436 ((ctxt->eflags & EFLG_ZF) == 0))
3437 || ((c->rep_prefix == REPNE_PREFIX) &&
3438 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3439 ctxt->restart = false;
3441 * Re-enter guest when pio read ahead buffer is empty or,
3442 * if it is not used, after each 1024 iteration.
3444 else if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
3445 (rc->end != 0 && rc->end == rc->pos)) {
3446 ctxt->restart = false;
3451 * reset read cache here in case string instruction is restared
3454 ctxt->decode.mem_read.end = 0;
3459 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3463 case 0x01: /* lgdt, lidt, lmsw */
3464 switch (c->modrm_reg) {
3466 unsigned long address;
3468 case 0: /* vmcall */
3469 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3470 goto cannot_emulate;
3472 rc = kvm_fix_hypercall(ctxt->vcpu);
3473 if (rc != X86EMUL_CONTINUE)
3476 /* Let the processor re-execute the fixed hypercall */
3478 /* Disable writeback. */
3479 c->dst.type = OP_NONE;
3482 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3483 &size, &address, c->op_bytes);
3484 if (rc != X86EMUL_CONTINUE)
3486 realmode_lgdt(ctxt->vcpu, size, address);
3487 /* Disable writeback. */
3488 c->dst.type = OP_NONE;
3490 case 3: /* lidt/vmmcall */
3491 if (c->modrm_mod == 3) {
3492 switch (c->modrm_rm) {
3494 rc = kvm_fix_hypercall(ctxt->vcpu);
3495 if (rc != X86EMUL_CONTINUE)
3499 goto cannot_emulate;
3502 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3505 if (rc != X86EMUL_CONTINUE)
3507 realmode_lidt(ctxt->vcpu, size, address);
3509 /* Disable writeback. */
3510 c->dst.type = OP_NONE;
3514 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3517 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3518 (c->src.val & 0x0f), ctxt->vcpu);
3519 c->dst.type = OP_NONE;
3521 case 5: /* not defined */
3525 emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
3526 /* Disable writeback. */
3527 c->dst.type = OP_NONE;
3530 goto cannot_emulate;
3533 case 0x05: /* syscall */
3534 rc = emulate_syscall(ctxt, ops);
3535 if (rc != X86EMUL_CONTINUE)
3541 emulate_clts(ctxt->vcpu);
3543 case 0x09: /* wbinvd */
3544 kvm_emulate_wbinvd(ctxt->vcpu);
3546 case 0x08: /* invd */
3547 case 0x0d: /* GrpP (prefetch) */
3548 case 0x18: /* Grp16 (prefetch/nop) */
3550 case 0x20: /* mov cr, reg */
3551 switch (c->modrm_reg) {
3558 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3560 case 0x21: /* mov from dr to reg */
3561 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3562 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3566 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3568 case 0x22: /* mov reg, cr */
3569 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3570 emulate_gp(ctxt, 0);
3573 c->dst.type = OP_NONE;
3575 case 0x23: /* mov from reg to dr */
3576 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3577 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3582 if (ops->set_dr(c->modrm_reg, c->src.val &
3583 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3584 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3585 /* #UD condition is already handled by the code above */
3586 emulate_gp(ctxt, 0);
3590 c->dst.type = OP_NONE; /* no writeback */
3594 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3595 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3596 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3597 emulate_gp(ctxt, 0);
3600 rc = X86EMUL_CONTINUE;
3604 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3605 emulate_gp(ctxt, 0);
3608 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3609 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3611 rc = X86EMUL_CONTINUE;
3613 case 0x34: /* sysenter */
3614 rc = emulate_sysenter(ctxt, ops);
3615 if (rc != X86EMUL_CONTINUE)
3620 case 0x35: /* sysexit */
3621 rc = emulate_sysexit(ctxt, ops);
3622 if (rc != X86EMUL_CONTINUE)
3627 case 0x40 ... 0x4f: /* cmov */
3628 c->dst.val = c->dst.orig_val = c->src.val;
3629 if (!test_cc(c->b, ctxt->eflags))
3630 c->dst.type = OP_NONE; /* no writeback */
3632 case 0x80 ... 0x8f: /* jnz rel, etc*/
3633 if (test_cc(c->b, ctxt->eflags))
3634 jmp_rel(c, c->src.val);
3636 case 0x90 ... 0x9f: /* setcc r/m8 */
3637 c->dst.val = test_cc(c->b, ctxt->eflags);
3639 case 0xa0: /* push fs */
3640 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3642 case 0xa1: /* pop fs */
3643 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3644 if (rc != X86EMUL_CONTINUE)
3649 c->dst.type = OP_NONE;
3650 /* only subword offset */
3651 c->src.val &= (c->dst.bytes << 3) - 1;
3652 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3654 case 0xa4: /* shld imm8, r, r/m */
3655 case 0xa5: /* shld cl, r, r/m */
3656 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3658 case 0xa8: /* push gs */
3659 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3661 case 0xa9: /* pop gs */
3662 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3663 if (rc != X86EMUL_CONTINUE)
3668 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3670 case 0xac: /* shrd imm8, r, r/m */
3671 case 0xad: /* shrd cl, r, r/m */
3672 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3674 case 0xae: /* clflush */
3676 case 0xb0 ... 0xb1: /* cmpxchg */
3678 * Save real source value, then compare EAX against
3681 c->src.orig_val = c->src.val;
3682 c->src.val = c->regs[VCPU_REGS_RAX];
3683 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3684 if (ctxt->eflags & EFLG_ZF) {
3685 /* Success: write back to memory. */
3686 c->dst.val = c->src.orig_val;
3688 /* Failure: write the value we saw to EAX. */
3689 c->dst.type = OP_REG;
3690 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3693 case 0xb2: /* lss */
3694 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
3695 if (rc != X86EMUL_CONTINUE)
3700 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3702 case 0xb4: /* lfs */
3703 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
3704 if (rc != X86EMUL_CONTINUE)
3707 case 0xb5: /* lgs */
3708 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
3709 if (rc != X86EMUL_CONTINUE)
3712 case 0xb6 ... 0xb7: /* movzx */
3713 c->dst.bytes = c->op_bytes;
3714 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3717 case 0xba: /* Grp8 */
3718 switch (c->modrm_reg & 3) {
3731 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3733 case 0xbc: { /* bsf */
3735 __asm__ ("bsf %2, %0; setz %1"
3736 : "=r"(c->dst.val), "=q"(zf)
3738 ctxt->eflags &= ~X86_EFLAGS_ZF;
3740 ctxt->eflags |= X86_EFLAGS_ZF;
3741 c->dst.type = OP_NONE; /* Disable writeback. */
3745 case 0xbd: { /* bsr */
3747 __asm__ ("bsr %2, %0; setz %1"
3748 : "=r"(c->dst.val), "=q"(zf)
3750 ctxt->eflags &= ~X86_EFLAGS_ZF;
3752 ctxt->eflags |= X86_EFLAGS_ZF;
3753 c->dst.type = OP_NONE; /* Disable writeback. */
3757 case 0xbe ... 0xbf: /* movsx */
3758 c->dst.bytes = c->op_bytes;
3759 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3762 case 0xc0 ... 0xc1: /* xadd */
3763 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3764 /* Write back the register source. */
3765 c->src.val = c->dst.orig_val;
3766 write_register_operand(&c->src);
3768 case 0xc3: /* movnti */
3769 c->dst.bytes = c->op_bytes;
3770 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3773 case 0xc7: /* Grp9 (cmpxchg8b) */
3774 rc = emulate_grp9(ctxt, ops);
3775 if (rc != X86EMUL_CONTINUE)
3779 goto cannot_emulate;
3784 DPRINTF("Cannot emulate %02x\n", c->b);