1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54 #define OpES 20ull /* ES */
55 #define OpCS 21ull /* CS */
56 #define OpSS 22ull /* SS */
57 #define OpDS 23ull /* DS */
58 #define OpFS 24ull /* FS */
59 #define OpGS 25ull /* GS */
61 #define OpBits 5 /* Width of operand field */
62 #define OpMask ((1ull << OpBits) - 1)
65 * Opcode effective-address decode tables.
66 * Note that we only emulate instructions that have at least one memory
67 * operand (excluding implicit stack references). We assume that stack
68 * references and instruction fetches will never occur in special memory
69 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
73 /* Operand sizes: 8-bit operands or specified/overridden size. */
74 #define ByteOp (1<<0) /* 8-bit operands. */
75 /* Destination operand type. */
77 #define ImplicitOps (OpImplicit << DstShift)
78 #define DstReg (OpReg << DstShift)
79 #define DstMem (OpMem << DstShift)
80 #define DstAcc (OpAcc << DstShift)
81 #define DstDI (OpDI << DstShift)
82 #define DstMem64 (OpMem64 << DstShift)
83 #define DstImmUByte (OpImmUByte << DstShift)
84 #define DstDX (OpDX << DstShift)
85 #define DstMask (OpMask << DstShift)
86 /* Source operand type. */
88 #define SrcNone (OpNone << SrcShift)
89 #define SrcReg (OpReg << SrcShift)
90 #define SrcMem (OpMem << SrcShift)
91 #define SrcMem16 (OpMem16 << SrcShift)
92 #define SrcMem32 (OpMem32 << SrcShift)
93 #define SrcImm (OpImm << SrcShift)
94 #define SrcImmByte (OpImmByte << SrcShift)
95 #define SrcOne (OpOne << SrcShift)
96 #define SrcImmUByte (OpImmUByte << SrcShift)
97 #define SrcImmU (OpImmU << SrcShift)
98 #define SrcSI (OpSI << SrcShift)
99 #define SrcImmFAddr (OpImmFAddr << SrcShift)
100 #define SrcMemFAddr (OpMemFAddr << SrcShift)
101 #define SrcAcc (OpAcc << SrcShift)
102 #define SrcImmU16 (OpImmU16 << SrcShift)
103 #define SrcDX (OpDX << SrcShift)
104 #define SrcMask (OpMask << SrcShift)
105 #define BitOp (1<<11)
106 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
107 #define String (1<<13) /* String instruction (rep capable) */
108 #define Stack (1<<14) /* Stack instruction (push/pop) */
109 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
110 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
111 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
112 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
113 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
114 #define Sse (1<<18) /* SSE Vector instruction */
115 /* Generic ModRM decode. */
116 #define ModRM (1<<19)
117 /* Destination is only written; never read. */
120 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
121 #define VendorSpecific (1<<22) /* Vendor specific instruction */
122 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
123 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
124 #define Undefined (1<<25) /* No Such Instruction */
125 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
126 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
128 /* Source 2 operand type */
129 #define Src2Shift (29)
130 #define Src2None (OpNone << Src2Shift)
131 #define Src2CL (OpCL << Src2Shift)
132 #define Src2ImmByte (OpImmByte << Src2Shift)
133 #define Src2One (OpOne << Src2Shift)
134 #define Src2Imm (OpImm << Src2Shift)
135 #define Src2ES (OpES << Src2Shift)
136 #define Src2CS (OpCS << Src2Shift)
137 #define Src2SS (OpSS << Src2Shift)
138 #define Src2DS (OpDS << Src2Shift)
139 #define Src2FS (OpFS << Src2Shift)
140 #define Src2GS (OpGS << Src2Shift)
141 #define Src2Mask (OpMask << Src2Shift)
143 #define X2(x...) x, x
144 #define X3(x...) X2(x), x
145 #define X4(x...) X2(x), X2(x)
146 #define X5(x...) X4(x), x
147 #define X6(x...) X4(x), X2(x)
148 #define X7(x...) X4(x), X3(x)
149 #define X8(x...) X4(x), X4(x)
150 #define X16(x...) X8(x), X8(x)
156 int (*execute)(struct x86_emulate_ctxt *ctxt);
157 struct opcode *group;
158 struct group_dual *gdual;
159 struct gprefix *gprefix;
161 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
165 struct opcode mod012[8];
166 struct opcode mod3[8];
170 struct opcode pfx_no;
171 struct opcode pfx_66;
172 struct opcode pfx_f2;
173 struct opcode pfx_f3;
176 /* EFLAGS bit definitions. */
177 #define EFLG_ID (1<<21)
178 #define EFLG_VIP (1<<20)
179 #define EFLG_VIF (1<<19)
180 #define EFLG_AC (1<<18)
181 #define EFLG_VM (1<<17)
182 #define EFLG_RF (1<<16)
183 #define EFLG_IOPL (3<<12)
184 #define EFLG_NT (1<<14)
185 #define EFLG_OF (1<<11)
186 #define EFLG_DF (1<<10)
187 #define EFLG_IF (1<<9)
188 #define EFLG_TF (1<<8)
189 #define EFLG_SF (1<<7)
190 #define EFLG_ZF (1<<6)
191 #define EFLG_AF (1<<4)
192 #define EFLG_PF (1<<2)
193 #define EFLG_CF (1<<0)
195 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
196 #define EFLG_RESERVED_ONE_MASK 2
199 * Instruction emulation:
200 * Most instructions are emulated directly via a fragment of inline assembly
201 * code. This allows us to save/restore EFLAGS and thus very easily pick up
202 * any modified flags.
205 #if defined(CONFIG_X86_64)
206 #define _LO32 "k" /* force 32-bit operand */
207 #define _STK "%%rsp" /* stack pointer */
208 #elif defined(__i386__)
209 #define _LO32 "" /* force 32-bit operand */
210 #define _STK "%%esp" /* stack pointer */
214 * These EFLAGS bits are restored from saved value during emulation, and
215 * any changes are written back to the saved value after emulation.
217 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
219 /* Before executing instruction: restore necessary bits in EFLAGS. */
220 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
221 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
222 "movl %"_sav",%"_LO32 _tmp"; " \
225 "movl %"_msk",%"_LO32 _tmp"; " \
226 "andl %"_LO32 _tmp",("_STK"); " \
228 "notl %"_LO32 _tmp"; " \
229 "andl %"_LO32 _tmp",("_STK"); " \
230 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
232 "orl %"_LO32 _tmp",("_STK"); " \
236 /* After executing instruction: write-back necessary bits in EFLAGS. */
237 #define _POST_EFLAGS(_sav, _msk, _tmp) \
238 /* _sav |= EFLAGS & _msk; */ \
241 "andl %"_msk",%"_LO32 _tmp"; " \
242 "orl %"_LO32 _tmp",%"_sav"; "
250 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
252 __asm__ __volatile__ ( \
253 _PRE_EFLAGS("0", "4", "2") \
254 _op _suffix " %"_x"3,%1; " \
255 _POST_EFLAGS("0", "4", "2") \
256 : "=m" ((ctxt)->eflags), \
257 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
259 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
263 /* Raw emulation: instruction has two explicit operands. */
264 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
266 unsigned long _tmp; \
268 switch ((ctxt)->dst.bytes) { \
270 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
273 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
276 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
281 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
283 unsigned long _tmp; \
284 switch ((ctxt)->dst.bytes) { \
286 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
289 __emulate_2op_nobyte(ctxt, _op, \
290 _wx, _wy, _lx, _ly, _qx, _qy); \
295 /* Source operand is byte-sized and may be restricted to just %cl. */
296 #define emulate_2op_SrcB(ctxt, _op) \
297 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
299 /* Source operand is byte, word, long or quad sized. */
300 #define emulate_2op_SrcV(ctxt, _op) \
301 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
303 /* Source operand is word, long or quad sized. */
304 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
305 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
307 /* Instruction has three operands and one operand is stored in ECX register */
308 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
310 unsigned long _tmp; \
311 _type _clv = (ctxt)->src2.val; \
312 _type _srcv = (ctxt)->src.val; \
313 _type _dstv = (ctxt)->dst.val; \
315 __asm__ __volatile__ ( \
316 _PRE_EFLAGS("0", "5", "2") \
317 _op _suffix " %4,%1 \n" \
318 _POST_EFLAGS("0", "5", "2") \
319 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
320 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
323 (ctxt)->src2.val = (unsigned long) _clv; \
324 (ctxt)->src2.val = (unsigned long) _srcv; \
325 (ctxt)->dst.val = (unsigned long) _dstv; \
328 #define emulate_2op_cl(ctxt, _op) \
330 switch ((ctxt)->dst.bytes) { \
332 __emulate_2op_cl(ctxt, _op, "w", u16); \
335 __emulate_2op_cl(ctxt, _op, "l", u32); \
338 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
343 #define __emulate_1op(ctxt, _op, _suffix) \
345 unsigned long _tmp; \
347 __asm__ __volatile__ ( \
348 _PRE_EFLAGS("0", "3", "2") \
349 _op _suffix " %1; " \
350 _POST_EFLAGS("0", "3", "2") \
351 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
353 : "i" (EFLAGS_MASK)); \
356 /* Instruction has only one explicit operand (no source operand). */
357 #define emulate_1op(ctxt, _op) \
359 switch ((ctxt)->dst.bytes) { \
360 case 1: __emulate_1op(ctxt, _op, "b"); break; \
361 case 2: __emulate_1op(ctxt, _op, "w"); break; \
362 case 4: __emulate_1op(ctxt, _op, "l"); break; \
363 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
367 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
369 unsigned long _tmp; \
370 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
371 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
373 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0", "5", "1") \
376 _op _suffix " %6; " \
378 _POST_EFLAGS("0", "5", "1") \
379 ".pushsection .fixup,\"ax\" \n\t" \
380 "3: movb $1, %4 \n\t" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
385 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
386 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
387 "a" (*rax), "d" (*rdx)); \
390 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
391 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
393 switch((ctxt)->src.bytes) { \
395 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
398 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
401 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
404 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
409 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
410 enum x86_intercept intercept,
411 enum x86_intercept_stage stage)
413 struct x86_instruction_info info = {
414 .intercept = intercept,
415 .rep_prefix = ctxt->rep_prefix,
416 .modrm_mod = ctxt->modrm_mod,
417 .modrm_reg = ctxt->modrm_reg,
418 .modrm_rm = ctxt->modrm_rm,
419 .src_val = ctxt->src.val64,
420 .src_bytes = ctxt->src.bytes,
421 .dst_bytes = ctxt->dst.bytes,
422 .ad_bytes = ctxt->ad_bytes,
423 .next_rip = ctxt->eip,
426 return ctxt->ops->intercept(ctxt, &info, stage);
429 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
431 return (1UL << (ctxt->ad_bytes << 3)) - 1;
434 /* Access/update address held in a register, based on addressing mode. */
435 static inline unsigned long
436 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
438 if (ctxt->ad_bytes == sizeof(unsigned long))
441 return reg & ad_mask(ctxt);
444 static inline unsigned long
445 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
447 return address_mask(ctxt, reg);
451 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
453 if (ctxt->ad_bytes == sizeof(unsigned long))
456 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
459 static u32 desc_limit_scaled(struct desc_struct *desc)
461 u32 limit = get_desc_limit(desc);
463 return desc->g ? (limit << 12) | 0xfff : limit;
466 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
468 ctxt->has_seg_override = true;
469 ctxt->seg_override = seg;
472 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
474 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
477 return ctxt->ops->get_cached_segment_base(ctxt, seg);
480 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
482 if (!ctxt->has_seg_override)
485 return ctxt->seg_override;
488 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
489 u32 error, bool valid)
491 ctxt->exception.vector = vec;
492 ctxt->exception.error_code = error;
493 ctxt->exception.error_code_valid = valid;
494 return X86EMUL_PROPAGATE_FAULT;
497 static int emulate_db(struct x86_emulate_ctxt *ctxt)
499 return emulate_exception(ctxt, DB_VECTOR, 0, false);
502 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
504 return emulate_exception(ctxt, GP_VECTOR, err, true);
507 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
509 return emulate_exception(ctxt, SS_VECTOR, err, true);
512 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
514 return emulate_exception(ctxt, UD_VECTOR, 0, false);
517 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
519 return emulate_exception(ctxt, TS_VECTOR, err, true);
522 static int emulate_de(struct x86_emulate_ctxt *ctxt)
524 return emulate_exception(ctxt, DE_VECTOR, 0, false);
527 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
529 return emulate_exception(ctxt, NM_VECTOR, 0, false);
532 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
535 switch (ctxt->op_bytes) {
537 ctxt->_eip = (u16)dst;
540 ctxt->_eip = (u32)dst;
544 if ((cs_l && is_noncanonical_address(dst)) ||
545 (!cs_l && (dst >> 32) != 0))
546 return emulate_gp(ctxt, 0);
551 WARN(1, "unsupported eip assignment size\n");
553 return X86EMUL_CONTINUE;
556 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
558 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
561 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
563 return assign_eip_near(ctxt, ctxt->_eip + rel);
566 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
569 struct desc_struct desc;
571 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
575 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
580 struct desc_struct desc;
582 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
583 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
586 static int __linearize(struct x86_emulate_ctxt *ctxt,
587 struct segmented_address addr,
588 unsigned size, bool write, bool fetch,
591 struct desc_struct desc;
598 la = seg_base(ctxt, addr.seg) + addr.ea;
599 switch (ctxt->mode) {
600 case X86EMUL_MODE_REAL:
602 case X86EMUL_MODE_PROT64:
603 if (((signed long)la << 16) >> 16 != la)
604 return emulate_gp(ctxt, 0);
607 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
611 /* code segment or read-only data segment */
612 if (((desc.type & 8) || !(desc.type & 2)) && write)
614 /* unreadable code segment */
615 if (!fetch && (desc.type & 8) && !(desc.type & 2))
617 lim = desc_limit_scaled(&desc);
618 if ((desc.type & 8) || !(desc.type & 4)) {
619 /* expand-up segment */
620 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
623 /* exapand-down segment */
624 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
626 lim = desc.d ? 0xffffffff : 0xffff;
627 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
630 cpl = ctxt->ops->cpl(ctxt);
633 if (!(desc.type & 8)) {
637 } else if ((desc.type & 8) && !(desc.type & 4)) {
638 /* nonconforming code segment */
641 } else if ((desc.type & 8) && (desc.type & 4)) {
642 /* conforming code segment */
648 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
651 return X86EMUL_CONTINUE;
653 if (addr.seg == VCPU_SREG_SS)
654 return emulate_ss(ctxt, addr.seg);
656 return emulate_gp(ctxt, addr.seg);
659 static int linearize(struct x86_emulate_ctxt *ctxt,
660 struct segmented_address addr,
661 unsigned size, bool write,
664 return __linearize(ctxt, addr, size, write, false, linear);
668 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
669 struct segmented_address addr,
676 rc = linearize(ctxt, addr, size, false, &linear);
677 if (rc != X86EMUL_CONTINUE)
679 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
683 * Fetch the next byte of the instruction being emulated which is pointed to
684 * by ctxt->_eip, then increment ctxt->_eip.
686 * Also prefetch the remaining bytes of the instruction without crossing page
687 * boundary if they are not in fetch_cache yet.
689 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
691 struct fetch_cache *fc = &ctxt->fetch;
695 if (ctxt->_eip == fc->end) {
696 unsigned long linear;
697 struct segmented_address addr = { .seg = VCPU_SREG_CS,
699 cur_size = fc->end - fc->start;
700 size = min(15UL - cur_size,
701 PAGE_SIZE - offset_in_page(ctxt->_eip));
702 rc = __linearize(ctxt, addr, size, false, true, &linear);
703 if (unlikely(rc != X86EMUL_CONTINUE))
705 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
706 size, &ctxt->exception);
707 if (unlikely(rc != X86EMUL_CONTINUE))
711 *dest = fc->data[ctxt->_eip - fc->start];
713 return X86EMUL_CONTINUE;
716 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
717 void *dest, unsigned size)
721 /* x86 instructions are limited to 15 bytes. */
722 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
723 return X86EMUL_UNHANDLEABLE;
725 rc = do_insn_fetch_byte(ctxt, dest++);
726 if (rc != X86EMUL_CONTINUE)
729 return X86EMUL_CONTINUE;
732 /* Fetch next part of the instruction being emulated. */
733 #define insn_fetch(_type, _ctxt) \
734 ({ unsigned long _x; \
735 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
736 if (rc != X86EMUL_CONTINUE) \
741 #define insn_fetch_arr(_arr, _size, _ctxt) \
742 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
743 if (rc != X86EMUL_CONTINUE) \
748 * Given the 'reg' portion of a ModRM byte, and a register block, return a
749 * pointer into the block that addresses the relevant register.
750 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
752 static void *decode_register(u8 modrm_reg, unsigned long *regs,
757 p = ®s[modrm_reg];
758 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
759 p = (unsigned char *)®s[modrm_reg & 3] + 1;
763 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
764 struct segmented_address addr,
765 u16 *size, unsigned long *address, int op_bytes)
772 rc = segmented_read_std(ctxt, addr, size, 2);
773 if (rc != X86EMUL_CONTINUE)
776 rc = segmented_read_std(ctxt, addr, address, op_bytes);
780 static int test_cc(unsigned int condition, unsigned int flags)
784 switch ((condition & 15) >> 1) {
786 rc |= (flags & EFLG_OF);
788 case 1: /* b/c/nae */
789 rc |= (flags & EFLG_CF);
792 rc |= (flags & EFLG_ZF);
795 rc |= (flags & (EFLG_CF|EFLG_ZF));
798 rc |= (flags & EFLG_SF);
801 rc |= (flags & EFLG_PF);
804 rc |= (flags & EFLG_ZF);
807 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
811 /* Odd condition identifiers (lsb == 1) have inverted sense. */
812 return (!!rc ^ (condition & 1));
815 static void fetch_register_operand(struct operand *op)
819 op->val = *(u8 *)op->addr.reg;
822 op->val = *(u16 *)op->addr.reg;
825 op->val = *(u32 *)op->addr.reg;
828 op->val = *(u64 *)op->addr.reg;
833 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
835 ctxt->ops->get_fpu(ctxt);
837 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
838 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
839 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
840 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
841 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
842 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
843 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
844 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
846 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
847 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
848 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
849 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
850 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
851 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
852 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
853 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
857 ctxt->ops->put_fpu(ctxt);
860 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
863 ctxt->ops->get_fpu(ctxt);
865 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
866 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
867 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
868 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
869 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
870 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
871 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
872 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
874 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
875 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
876 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
877 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
878 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
879 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
880 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
881 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
885 ctxt->ops->put_fpu(ctxt);
888 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
892 unsigned reg = ctxt->modrm_reg;
893 int highbyte_regs = ctxt->rex_prefix == 0;
895 if (!(ctxt->d & ModRM))
896 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
902 read_sse_reg(ctxt, &op->vec_val, reg);
907 if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
908 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
911 op->addr.reg = decode_register(reg, ctxt->regs, 0);
912 op->bytes = ctxt->op_bytes;
914 fetch_register_operand(op);
915 op->orig_val = op->val;
918 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
922 int index_reg = 0, base_reg = 0, scale;
923 int rc = X86EMUL_CONTINUE;
926 if (ctxt->rex_prefix) {
927 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
928 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
929 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
932 ctxt->modrm = insn_fetch(u8, ctxt);
933 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
934 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
935 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
936 ctxt->modrm_seg = VCPU_SREG_DS;
938 if (ctxt->modrm_mod == 3) {
940 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
941 op->addr.reg = decode_register(ctxt->modrm_rm,
942 ctxt->regs, ctxt->d & ByteOp);
946 op->addr.xmm = ctxt->modrm_rm;
947 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
950 fetch_register_operand(op);
956 if (ctxt->ad_bytes == 2) {
957 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
958 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
959 unsigned si = ctxt->regs[VCPU_REGS_RSI];
960 unsigned di = ctxt->regs[VCPU_REGS_RDI];
962 /* 16-bit ModR/M decode. */
963 switch (ctxt->modrm_mod) {
965 if (ctxt->modrm_rm == 6)
966 modrm_ea += insn_fetch(u16, ctxt);
969 modrm_ea += insn_fetch(s8, ctxt);
972 modrm_ea += insn_fetch(u16, ctxt);
975 switch (ctxt->modrm_rm) {
995 if (ctxt->modrm_mod != 0)
1002 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1003 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1004 ctxt->modrm_seg = VCPU_SREG_SS;
1005 modrm_ea = (u16)modrm_ea;
1007 /* 32/64-bit ModR/M decode. */
1008 if ((ctxt->modrm_rm & 7) == 4) {
1009 sib = insn_fetch(u8, ctxt);
1010 index_reg |= (sib >> 3) & 7;
1011 base_reg |= sib & 7;
1014 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1015 modrm_ea += insn_fetch(s32, ctxt);
1017 modrm_ea += ctxt->regs[base_reg];
1019 modrm_ea += ctxt->regs[index_reg] << scale;
1020 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1021 if (ctxt->mode == X86EMUL_MODE_PROT64)
1022 ctxt->rip_relative = 1;
1024 modrm_ea += ctxt->regs[ctxt->modrm_rm];
1025 switch (ctxt->modrm_mod) {
1027 if (ctxt->modrm_rm == 5)
1028 modrm_ea += insn_fetch(s32, ctxt);
1031 modrm_ea += insn_fetch(s8, ctxt);
1034 modrm_ea += insn_fetch(s32, ctxt);
1038 op->addr.mem.ea = modrm_ea;
1043 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1046 int rc = X86EMUL_CONTINUE;
1049 switch (ctxt->ad_bytes) {
1051 op->addr.mem.ea = insn_fetch(u16, ctxt);
1054 op->addr.mem.ea = insn_fetch(u32, ctxt);
1057 op->addr.mem.ea = insn_fetch(u64, ctxt);
1064 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1068 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1069 mask = ~(ctxt->dst.bytes * 8 - 1);
1071 if (ctxt->src.bytes == 2)
1072 sv = (s16)ctxt->src.val & (s16)mask;
1073 else if (ctxt->src.bytes == 4)
1074 sv = (s32)ctxt->src.val & (s32)mask;
1076 ctxt->dst.addr.mem.ea += (sv >> 3);
1079 /* only subword offset */
1080 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1083 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1084 unsigned long addr, void *dest, unsigned size)
1087 struct read_cache *mc = &ctxt->mem_read;
1090 int n = min(size, 8u);
1092 if (mc->pos < mc->end)
1095 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1097 if (rc != X86EMUL_CONTINUE)
1102 memcpy(dest, mc->data + mc->pos, n);
1107 return X86EMUL_CONTINUE;
1110 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1111 struct segmented_address addr,
1118 rc = linearize(ctxt, addr, size, false, &linear);
1119 if (rc != X86EMUL_CONTINUE)
1121 return read_emulated(ctxt, linear, data, size);
1124 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1125 struct segmented_address addr,
1132 rc = linearize(ctxt, addr, size, true, &linear);
1133 if (rc != X86EMUL_CONTINUE)
1135 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1139 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1140 struct segmented_address addr,
1141 const void *orig_data, const void *data,
1147 rc = linearize(ctxt, addr, size, true, &linear);
1148 if (rc != X86EMUL_CONTINUE)
1150 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1151 size, &ctxt->exception);
1154 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1155 unsigned int size, unsigned short port,
1158 struct read_cache *rc = &ctxt->io_read;
1160 if (rc->pos == rc->end) { /* refill pio read ahead */
1161 unsigned int in_page, n;
1162 unsigned int count = ctxt->rep_prefix ?
1163 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1164 in_page = (ctxt->eflags & EFLG_DF) ?
1165 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1166 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1167 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1171 rc->pos = rc->end = 0;
1172 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1177 memcpy(dest, rc->data + rc->pos, size);
1182 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1183 u16 selector, struct desc_ptr *dt)
1185 struct x86_emulate_ops *ops = ctxt->ops;
1187 if (selector & 1 << 2) {
1188 struct desc_struct desc;
1191 memset (dt, 0, sizeof *dt);
1192 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1195 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1196 dt->address = get_desc_base(&desc);
1198 ops->get_gdt(ctxt, dt);
1201 /* allowed just for 8 bytes segments */
1202 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1203 u16 selector, struct desc_struct *desc)
1206 u16 index = selector >> 3;
1209 get_descriptor_table_ptr(ctxt, selector, &dt);
1211 if (dt.size < index * 8 + 7)
1212 return emulate_gp(ctxt, selector & 0xfffc);
1214 addr = dt.address + index * 8;
1215 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1219 /* allowed just for 8 bytes segments */
1220 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1221 u16 selector, struct desc_struct *desc)
1224 u16 index = selector >> 3;
1227 get_descriptor_table_ptr(ctxt, selector, &dt);
1229 if (dt.size < index * 8 + 7)
1230 return emulate_gp(ctxt, selector & 0xfffc);
1232 addr = dt.address + index * 8;
1233 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1237 /* Does not support long mode */
1238 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1239 u16 selector, int seg, u8 cpl,
1240 struct desc_struct *desc)
1242 struct desc_struct seg_desc;
1244 unsigned err_vec = GP_VECTOR;
1246 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1249 memset(&seg_desc, 0, sizeof seg_desc);
1251 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1252 || ctxt->mode == X86EMUL_MODE_REAL) {
1253 /* set real mode segment descriptor */
1254 set_desc_base(&seg_desc, selector << 4);
1255 set_desc_limit(&seg_desc, 0xffff);
1262 /* NULL selector is not valid for TR, CS and SS */
1263 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1267 /* TR should be in GDT only */
1268 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1271 if (null_selector) /* for NULL selector skip all following checks */
1274 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1275 if (ret != X86EMUL_CONTINUE)
1278 err_code = selector & 0xfffc;
1279 err_vec = GP_VECTOR;
1281 /* can't load system descriptor into segment selecor */
1282 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1286 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1296 * segment is not a writable data segment or segment
1297 * selector's RPL != CPL or segment selector's RPL != CPL
1299 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1303 if (!(seg_desc.type & 8))
1306 if (seg_desc.type & 4) {
1312 if (rpl > cpl || dpl != cpl)
1315 /* CS(RPL) <- CPL */
1316 selector = (selector & 0xfffc) | cpl;
1319 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1322 case VCPU_SREG_LDTR:
1323 if (seg_desc.s || seg_desc.type != 2)
1326 default: /* DS, ES, FS, or GS */
1328 * segment is not a data or readable code segment or
1329 * ((segment is a data or nonconforming code segment)
1330 * and (both RPL and CPL > DPL))
1332 if ((seg_desc.type & 0xa) == 0x8 ||
1333 (((seg_desc.type & 0xc) != 0xc) &&
1334 (rpl > dpl && cpl > dpl)))
1340 /* mark segment as accessed */
1342 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1343 if (ret != X86EMUL_CONTINUE)
1347 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1350 return X86EMUL_CONTINUE;
1352 emulate_exception(ctxt, err_vec, err_code, true);
1353 return X86EMUL_PROPAGATE_FAULT;
1356 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1357 u16 selector, int seg)
1359 u8 cpl = ctxt->ops->cpl(ctxt);
1360 return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL);
1363 static void write_register_operand(struct operand *op)
1365 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1366 switch (op->bytes) {
1368 *(u8 *)op->addr.reg = (u8)op->val;
1371 *(u16 *)op->addr.reg = (u16)op->val;
1374 *op->addr.reg = (u32)op->val;
1375 break; /* 64b: zero-extend */
1377 *op->addr.reg = op->val;
1382 static int writeback(struct x86_emulate_ctxt *ctxt)
1386 switch (ctxt->dst.type) {
1388 write_register_operand(&ctxt->dst);
1391 if (ctxt->lock_prefix)
1392 rc = segmented_cmpxchg(ctxt,
1394 &ctxt->dst.orig_val,
1398 rc = segmented_write(ctxt,
1402 if (rc != X86EMUL_CONTINUE)
1406 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1414 return X86EMUL_CONTINUE;
1417 static int em_push(struct x86_emulate_ctxt *ctxt)
1419 struct segmented_address addr;
1421 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1422 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1423 addr.seg = VCPU_SREG_SS;
1425 /* Disable writeback. */
1426 ctxt->dst.type = OP_NONE;
1427 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1430 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1431 void *dest, int len)
1434 struct segmented_address addr;
1436 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1437 addr.seg = VCPU_SREG_SS;
1438 rc = segmented_read(ctxt, addr, dest, len);
1439 if (rc != X86EMUL_CONTINUE)
1442 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1446 static int em_pop(struct x86_emulate_ctxt *ctxt)
1448 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1451 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1452 void *dest, int len)
1455 unsigned long val, change_mask;
1456 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1457 int cpl = ctxt->ops->cpl(ctxt);
1459 rc = emulate_pop(ctxt, &val, len);
1460 if (rc != X86EMUL_CONTINUE)
1463 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1464 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1466 switch(ctxt->mode) {
1467 case X86EMUL_MODE_PROT64:
1468 case X86EMUL_MODE_PROT32:
1469 case X86EMUL_MODE_PROT16:
1471 change_mask |= EFLG_IOPL;
1473 change_mask |= EFLG_IF;
1475 case X86EMUL_MODE_VM86:
1477 return emulate_gp(ctxt, 0);
1478 change_mask |= EFLG_IF;
1480 default: /* real mode */
1481 change_mask |= (EFLG_IOPL | EFLG_IF);
1485 *(unsigned long *)dest =
1486 (ctxt->eflags & ~change_mask) | (val & change_mask);
1491 static int em_popf(struct x86_emulate_ctxt *ctxt)
1493 ctxt->dst.type = OP_REG;
1494 ctxt->dst.addr.reg = &ctxt->eflags;
1495 ctxt->dst.bytes = ctxt->op_bytes;
1496 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1499 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1501 int seg = ctxt->src2.val;
1503 ctxt->src.val = get_segment_selector(ctxt, seg);
1505 return em_push(ctxt);
1508 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1510 int seg = ctxt->src2.val;
1511 unsigned long selector;
1514 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1515 if (rc != X86EMUL_CONTINUE)
1518 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1522 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1524 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1525 int rc = X86EMUL_CONTINUE;
1526 int reg = VCPU_REGS_RAX;
1528 while (reg <= VCPU_REGS_RDI) {
1529 (reg == VCPU_REGS_RSP) ?
1530 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1533 if (rc != X86EMUL_CONTINUE)
1542 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1544 ctxt->src.val = (unsigned long)ctxt->eflags;
1545 return em_push(ctxt);
1548 static int em_popa(struct x86_emulate_ctxt *ctxt)
1550 int rc = X86EMUL_CONTINUE;
1551 int reg = VCPU_REGS_RDI;
1553 while (reg >= VCPU_REGS_RAX) {
1554 if (reg == VCPU_REGS_RSP) {
1555 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1560 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1561 if (rc != X86EMUL_CONTINUE)
1568 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1570 struct x86_emulate_ops *ops = ctxt->ops;
1577 /* TODO: Add limit checks */
1578 ctxt->src.val = ctxt->eflags;
1580 if (rc != X86EMUL_CONTINUE)
1583 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1585 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1587 if (rc != X86EMUL_CONTINUE)
1590 ctxt->src.val = ctxt->_eip;
1592 if (rc != X86EMUL_CONTINUE)
1595 ops->get_idt(ctxt, &dt);
1597 eip_addr = dt.address + (irq << 2);
1598 cs_addr = dt.address + (irq << 2) + 2;
1600 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1601 if (rc != X86EMUL_CONTINUE)
1604 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1605 if (rc != X86EMUL_CONTINUE)
1608 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1609 if (rc != X86EMUL_CONTINUE)
1617 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1619 switch(ctxt->mode) {
1620 case X86EMUL_MODE_REAL:
1621 return emulate_int_real(ctxt, irq);
1622 case X86EMUL_MODE_VM86:
1623 case X86EMUL_MODE_PROT16:
1624 case X86EMUL_MODE_PROT32:
1625 case X86EMUL_MODE_PROT64:
1627 /* Protected mode interrupts unimplemented yet */
1628 return X86EMUL_UNHANDLEABLE;
1632 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1634 int rc = X86EMUL_CONTINUE;
1635 unsigned long temp_eip = 0;
1636 unsigned long temp_eflags = 0;
1637 unsigned long cs = 0;
1638 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1639 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1640 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1641 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1643 /* TODO: Add stack limit check */
1645 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1647 if (rc != X86EMUL_CONTINUE)
1650 if (temp_eip & ~0xffff)
1651 return emulate_gp(ctxt, 0);
1653 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1655 if (rc != X86EMUL_CONTINUE)
1658 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1660 if (rc != X86EMUL_CONTINUE)
1663 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1665 if (rc != X86EMUL_CONTINUE)
1668 ctxt->_eip = temp_eip;
1671 if (ctxt->op_bytes == 4)
1672 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1673 else if (ctxt->op_bytes == 2) {
1674 ctxt->eflags &= ~0xffff;
1675 ctxt->eflags |= temp_eflags;
1678 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1679 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1684 static int em_iret(struct x86_emulate_ctxt *ctxt)
1686 switch(ctxt->mode) {
1687 case X86EMUL_MODE_REAL:
1688 return emulate_iret_real(ctxt);
1689 case X86EMUL_MODE_VM86:
1690 case X86EMUL_MODE_PROT16:
1691 case X86EMUL_MODE_PROT32:
1692 case X86EMUL_MODE_PROT64:
1694 /* iret from protected mode unimplemented yet */
1695 return X86EMUL_UNHANDLEABLE;
1699 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1702 unsigned short sel, old_sel;
1703 struct desc_struct old_desc, new_desc;
1704 const struct x86_emulate_ops *ops = ctxt->ops;
1705 u8 cpl = ctxt->ops->cpl(ctxt);
1707 /* Assignment of RIP may only fail in 64-bit mode */
1708 if (ctxt->mode == X86EMUL_MODE_PROT64)
1709 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
1712 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1714 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
1716 if (rc != X86EMUL_CONTINUE)
1719 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1720 if (rc != X86EMUL_CONTINUE) {
1721 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1722 /* assigning eip failed; restore the old cs */
1723 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
1729 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1731 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1734 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1736 switch (ctxt->modrm_reg) {
1738 emulate_2op_SrcB(ctxt, "rol");
1741 emulate_2op_SrcB(ctxt, "ror");
1744 emulate_2op_SrcB(ctxt, "rcl");
1747 emulate_2op_SrcB(ctxt, "rcr");
1749 case 4: /* sal/shl */
1750 case 6: /* sal/shl */
1751 emulate_2op_SrcB(ctxt, "sal");
1754 emulate_2op_SrcB(ctxt, "shr");
1757 emulate_2op_SrcB(ctxt, "sar");
1760 return X86EMUL_CONTINUE;
1763 static int em_not(struct x86_emulate_ctxt *ctxt)
1765 ctxt->dst.val = ~ctxt->dst.val;
1766 return X86EMUL_CONTINUE;
1769 static int em_neg(struct x86_emulate_ctxt *ctxt)
1771 emulate_1op(ctxt, "neg");
1772 return X86EMUL_CONTINUE;
1775 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1779 emulate_1op_rax_rdx(ctxt, "mul", ex);
1780 return X86EMUL_CONTINUE;
1783 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1787 emulate_1op_rax_rdx(ctxt, "imul", ex);
1788 return X86EMUL_CONTINUE;
1791 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1795 emulate_1op_rax_rdx(ctxt, "div", de);
1797 return emulate_de(ctxt);
1798 return X86EMUL_CONTINUE;
1801 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1805 emulate_1op_rax_rdx(ctxt, "idiv", de);
1807 return emulate_de(ctxt);
1808 return X86EMUL_CONTINUE;
1811 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1813 int rc = X86EMUL_CONTINUE;
1815 switch (ctxt->modrm_reg) {
1817 emulate_1op(ctxt, "inc");
1820 emulate_1op(ctxt, "dec");
1822 case 2: /* call near abs */ {
1824 old_eip = ctxt->_eip;
1825 rc = assign_eip_near(ctxt, ctxt->src.val);
1826 if (rc != X86EMUL_CONTINUE)
1828 ctxt->src.val = old_eip;
1832 case 4: /* jmp abs */
1833 rc = assign_eip_near(ctxt, ctxt->src.val);
1835 case 5: /* jmp far */
1836 rc = em_jmp_far(ctxt);
1845 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1847 u64 old = ctxt->dst.orig_val64;
1849 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1850 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1851 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1852 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1853 ctxt->eflags &= ~EFLG_ZF;
1855 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1856 (u32) ctxt->regs[VCPU_REGS_RBX];
1858 ctxt->eflags |= EFLG_ZF;
1860 return X86EMUL_CONTINUE;
1863 static int em_ret(struct x86_emulate_ctxt *ctxt)
1868 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1869 if (rc != X86EMUL_CONTINUE)
1872 return assign_eip_near(ctxt, eip);
1875 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1878 unsigned long eip, cs;
1880 int cpl = ctxt->ops->cpl(ctxt);
1881 struct desc_struct old_desc, new_desc;
1882 const struct x86_emulate_ops *ops = ctxt->ops;
1884 if (ctxt->mode == X86EMUL_MODE_PROT64)
1885 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
1888 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1889 if (rc != X86EMUL_CONTINUE)
1891 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1892 if (rc != X86EMUL_CONTINUE)
1894 /* Outer-privilege level return is not implemented */
1895 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
1896 return X86EMUL_UNHANDLEABLE;
1897 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0,
1899 if (rc != X86EMUL_CONTINUE)
1901 rc = assign_eip_far(ctxt, eip, new_desc.l);
1902 if (rc != X86EMUL_CONTINUE) {
1903 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1904 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
1909 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1911 int seg = ctxt->src2.val;
1915 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1917 rc = load_segment_descriptor(ctxt, sel, seg);
1918 if (rc != X86EMUL_CONTINUE)
1921 ctxt->dst.val = ctxt->src.val;
1926 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1927 struct desc_struct *cs, struct desc_struct *ss)
1931 memset(cs, 0, sizeof(struct desc_struct));
1932 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1933 memset(ss, 0, sizeof(struct desc_struct));
1935 cs->l = 0; /* will be adjusted later */
1936 set_desc_base(cs, 0); /* flat segment */
1937 cs->g = 1; /* 4kb granularity */
1938 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1939 cs->type = 0x0b; /* Read, Execute, Accessed */
1941 cs->dpl = 0; /* will be adjusted later */
1945 set_desc_base(ss, 0); /* flat segment */
1946 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1947 ss->g = 1; /* 4kb granularity */
1949 ss->type = 0x03; /* Read/Write, Accessed */
1950 ss->d = 1; /* 32bit stack segment */
1955 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
1957 u32 eax, ebx, ecx, edx;
1960 return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)
1961 && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
1962 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
1963 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
1966 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1968 struct x86_emulate_ops *ops = ctxt->ops;
1969 u32 eax, ebx, ecx, edx;
1972 * syscall should always be enabled in longmode - so only become
1973 * vendor specific (cpuid) if other modes are active...
1975 if (ctxt->mode == X86EMUL_MODE_PROT64)
1980 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1982 * Intel ("GenuineIntel")
1983 * remark: Intel CPUs only support "syscall" in 64bit
1984 * longmode. Also an 64bit guest with a
1985 * 32bit compat-app running will #UD !! While this
1986 * behaviour can be fixed (by emulating) into AMD
1987 * response - CPUs of AMD can't behave like Intel.
1989 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1990 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1991 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1994 /* AMD ("AuthenticAMD") */
1995 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1996 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1997 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2000 /* AMD ("AMDisbetter!") */
2001 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2002 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2003 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2007 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2011 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2013 struct x86_emulate_ops *ops = ctxt->ops;
2014 struct desc_struct cs, ss;
2019 /* syscall is not available in real mode */
2020 if (ctxt->mode == X86EMUL_MODE_REAL ||
2021 ctxt->mode == X86EMUL_MODE_VM86)
2022 return emulate_ud(ctxt);
2024 if (!(em_syscall_is_enabled(ctxt)))
2025 return emulate_ud(ctxt);
2027 ops->get_msr(ctxt, MSR_EFER, &efer);
2028 setup_syscalls_segments(ctxt, &cs, &ss);
2030 if (!(efer & EFER_SCE))
2031 return emulate_ud(ctxt);
2033 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2035 cs_sel = (u16)(msr_data & 0xfffc);
2036 ss_sel = (u16)(msr_data + 8);
2038 if (efer & EFER_LMA) {
2042 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2043 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2045 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2046 if (efer & EFER_LMA) {
2047 #ifdef CONFIG_X86_64
2048 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2051 ctxt->mode == X86EMUL_MODE_PROT64 ?
2052 MSR_LSTAR : MSR_CSTAR, &msr_data);
2053 ctxt->_eip = msr_data;
2055 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2056 ctxt->eflags &= ~(msr_data | EFLG_RF);
2060 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2061 ctxt->_eip = (u32)msr_data;
2063 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2066 return X86EMUL_CONTINUE;
2069 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2071 struct x86_emulate_ops *ops = ctxt->ops;
2072 struct desc_struct cs, ss;
2077 ops->get_msr(ctxt, MSR_EFER, &efer);
2078 /* inject #GP if in real mode */
2079 if (ctxt->mode == X86EMUL_MODE_REAL)
2080 return emulate_gp(ctxt, 0);
2083 * Not recognized on AMD in compat mode (but is recognized in legacy
2086 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2087 && !vendor_intel(ctxt))
2088 return emulate_ud(ctxt);
2090 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2091 * Therefore, we inject an #UD.
2093 if (ctxt->mode == X86EMUL_MODE_PROT64)
2094 return emulate_ud(ctxt);
2096 setup_syscalls_segments(ctxt, &cs, &ss);
2098 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2099 if ((msr_data & 0xfffc) == 0x0)
2100 return emulate_gp(ctxt, 0);
2102 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2103 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
2104 ss_sel = cs_sel + 8;
2105 if (efer & EFER_LMA) {
2110 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2111 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2113 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2114 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2116 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2117 ctxt->regs[VCPU_REGS_RSP] = (efer & EFER_LMA) ? msr_data :
2120 return X86EMUL_CONTINUE;
2123 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2125 struct x86_emulate_ops *ops = ctxt->ops;
2126 struct desc_struct cs, ss;
2127 u64 msr_data, rcx, rdx;
2129 u16 cs_sel = 0, ss_sel = 0;
2131 /* inject #GP if in real mode or Virtual 8086 mode */
2132 if (ctxt->mode == X86EMUL_MODE_REAL ||
2133 ctxt->mode == X86EMUL_MODE_VM86)
2134 return emulate_gp(ctxt, 0);
2136 setup_syscalls_segments(ctxt, &cs, &ss);
2138 if ((ctxt->rex_prefix & 0x8) != 0x0)
2139 usermode = X86EMUL_MODE_PROT64;
2141 usermode = X86EMUL_MODE_PROT32;
2143 rcx = ctxt->regs[VCPU_REGS_RCX];
2144 rdx = ctxt->regs[VCPU_REGS_RDX];
2148 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2150 case X86EMUL_MODE_PROT32:
2151 cs_sel = (u16)(msr_data + 16);
2152 if ((msr_data & 0xfffc) == 0x0)
2153 return emulate_gp(ctxt, 0);
2154 ss_sel = (u16)(msr_data + 24);
2156 case X86EMUL_MODE_PROT64:
2157 cs_sel = (u16)(msr_data + 32);
2158 if (msr_data == 0x0)
2159 return emulate_gp(ctxt, 0);
2160 ss_sel = cs_sel + 8;
2163 if (is_noncanonical_address(rcx) ||
2164 is_noncanonical_address(rdx))
2165 return emulate_gp(ctxt, 0);
2168 cs_sel |= SELECTOR_RPL_MASK;
2169 ss_sel |= SELECTOR_RPL_MASK;
2171 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2172 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2175 ctxt->regs[VCPU_REGS_RSP] = rcx;
2177 return X86EMUL_CONTINUE;
2180 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2183 if (ctxt->mode == X86EMUL_MODE_REAL)
2185 if (ctxt->mode == X86EMUL_MODE_VM86)
2187 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2188 return ctxt->ops->cpl(ctxt) > iopl;
2191 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2194 struct x86_emulate_ops *ops = ctxt->ops;
2195 struct desc_struct tr_seg;
2198 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2199 unsigned mask = (1 << len) - 1;
2202 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2205 if (desc_limit_scaled(&tr_seg) < 103)
2207 base = get_desc_base(&tr_seg);
2208 #ifdef CONFIG_X86_64
2209 base |= ((u64)base3) << 32;
2211 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2212 if (r != X86EMUL_CONTINUE)
2214 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2216 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2217 if (r != X86EMUL_CONTINUE)
2219 if ((perm >> bit_idx) & mask)
2224 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2230 if (emulator_bad_iopl(ctxt))
2231 if (!emulator_io_port_access_allowed(ctxt, port, len))
2234 ctxt->perm_ok = true;
2239 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2240 struct tss_segment_16 *tss)
2242 tss->ip = ctxt->_eip;
2243 tss->flag = ctxt->eflags;
2244 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2245 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2246 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2247 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2248 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2249 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2250 tss->si = ctxt->regs[VCPU_REGS_RSI];
2251 tss->di = ctxt->regs[VCPU_REGS_RDI];
2253 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2254 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2255 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2256 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2257 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2260 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2261 struct tss_segment_16 *tss)
2266 ctxt->_eip = tss->ip;
2267 ctxt->eflags = tss->flag | 2;
2268 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2269 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2270 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2271 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2272 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2273 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2274 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2275 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2278 * SDM says that segment selectors are loaded before segment
2281 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2282 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2283 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2284 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2285 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2290 * Now load segment descriptors. If fault happenes at this stage
2291 * it is handled in a context of new task
2293 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2295 if (ret != X86EMUL_CONTINUE)
2297 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2299 if (ret != X86EMUL_CONTINUE)
2301 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2303 if (ret != X86EMUL_CONTINUE)
2305 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2307 if (ret != X86EMUL_CONTINUE)
2309 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2311 if (ret != X86EMUL_CONTINUE)
2314 return X86EMUL_CONTINUE;
2317 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2318 u16 tss_selector, u16 old_tss_sel,
2319 ulong old_tss_base, struct desc_struct *new_desc)
2321 struct x86_emulate_ops *ops = ctxt->ops;
2322 struct tss_segment_16 tss_seg;
2324 u32 new_tss_base = get_desc_base(new_desc);
2326 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2328 if (ret != X86EMUL_CONTINUE)
2329 /* FIXME: need to provide precise fault address */
2332 save_state_to_tss16(ctxt, &tss_seg);
2334 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2336 if (ret != X86EMUL_CONTINUE)
2337 /* FIXME: need to provide precise fault address */
2340 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2342 if (ret != X86EMUL_CONTINUE)
2343 /* FIXME: need to provide precise fault address */
2346 if (old_tss_sel != 0xffff) {
2347 tss_seg.prev_task_link = old_tss_sel;
2349 ret = ops->write_std(ctxt, new_tss_base,
2350 &tss_seg.prev_task_link,
2351 sizeof tss_seg.prev_task_link,
2353 if (ret != X86EMUL_CONTINUE)
2354 /* FIXME: need to provide precise fault address */
2358 return load_state_from_tss16(ctxt, &tss_seg);
2361 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2362 struct tss_segment_32 *tss)
2364 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2365 tss->eip = ctxt->_eip;
2366 tss->eflags = ctxt->eflags;
2367 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2368 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2369 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2370 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2371 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2372 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2373 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2374 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2376 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2377 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2378 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2379 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2380 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2381 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2382 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2385 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2386 struct tss_segment_32 *tss)
2391 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2392 return emulate_gp(ctxt, 0);
2393 ctxt->_eip = tss->eip;
2394 ctxt->eflags = tss->eflags | 2;
2395 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2396 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2397 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2398 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2399 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2400 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2401 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2402 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2405 * SDM says that segment selectors are loaded before segment
2406 * descriptors. This is important because CPL checks will
2409 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2410 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2411 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2412 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2413 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2414 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2415 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2420 * Now load segment descriptors. If fault happenes at this stage
2421 * it is handled in a context of new task
2423 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2425 if (ret != X86EMUL_CONTINUE)
2427 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2429 if (ret != X86EMUL_CONTINUE)
2431 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2433 if (ret != X86EMUL_CONTINUE)
2435 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2437 if (ret != X86EMUL_CONTINUE)
2439 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2441 if (ret != X86EMUL_CONTINUE)
2443 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2445 if (ret != X86EMUL_CONTINUE)
2447 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2449 if (ret != X86EMUL_CONTINUE)
2452 return X86EMUL_CONTINUE;
2455 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2456 u16 tss_selector, u16 old_tss_sel,
2457 ulong old_tss_base, struct desc_struct *new_desc)
2459 struct x86_emulate_ops *ops = ctxt->ops;
2460 struct tss_segment_32 tss_seg;
2462 u32 new_tss_base = get_desc_base(new_desc);
2464 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2466 if (ret != X86EMUL_CONTINUE)
2467 /* FIXME: need to provide precise fault address */
2470 save_state_to_tss32(ctxt, &tss_seg);
2472 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2474 if (ret != X86EMUL_CONTINUE)
2475 /* FIXME: need to provide precise fault address */
2478 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2480 if (ret != X86EMUL_CONTINUE)
2481 /* FIXME: need to provide precise fault address */
2484 if (old_tss_sel != 0xffff) {
2485 tss_seg.prev_task_link = old_tss_sel;
2487 ret = ops->write_std(ctxt, new_tss_base,
2488 &tss_seg.prev_task_link,
2489 sizeof tss_seg.prev_task_link,
2491 if (ret != X86EMUL_CONTINUE)
2492 /* FIXME: need to provide precise fault address */
2496 return load_state_from_tss32(ctxt, &tss_seg);
2499 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2500 u16 tss_selector, int reason,
2501 bool has_error_code, u32 error_code)
2503 struct x86_emulate_ops *ops = ctxt->ops;
2504 struct desc_struct curr_tss_desc, next_tss_desc;
2506 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2507 ulong old_tss_base =
2508 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2511 /* FIXME: old_tss_base == ~0 ? */
2513 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2514 if (ret != X86EMUL_CONTINUE)
2516 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2517 if (ret != X86EMUL_CONTINUE)
2520 /* FIXME: check that next_tss_desc is tss */
2522 if (reason != TASK_SWITCH_IRET) {
2523 if ((tss_selector & 3) > next_tss_desc.dpl ||
2524 ops->cpl(ctxt) > next_tss_desc.dpl)
2525 return emulate_gp(ctxt, 0);
2528 desc_limit = desc_limit_scaled(&next_tss_desc);
2529 if (!next_tss_desc.p ||
2530 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2531 desc_limit < 0x2b)) {
2532 emulate_ts(ctxt, tss_selector & 0xfffc);
2533 return X86EMUL_PROPAGATE_FAULT;
2536 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2537 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2538 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2541 if (reason == TASK_SWITCH_IRET)
2542 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2544 /* set back link to prev task only if NT bit is set in eflags
2545 note that old_tss_sel is not used afetr this point */
2546 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2547 old_tss_sel = 0xffff;
2549 if (next_tss_desc.type & 8)
2550 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2551 old_tss_base, &next_tss_desc);
2553 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2554 old_tss_base, &next_tss_desc);
2555 if (ret != X86EMUL_CONTINUE)
2558 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2559 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2561 if (reason != TASK_SWITCH_IRET) {
2562 next_tss_desc.type |= (1 << 1); /* set busy flag */
2563 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2566 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2567 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2569 if (has_error_code) {
2570 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2571 ctxt->lock_prefix = 0;
2572 ctxt->src.val = (unsigned long) error_code;
2573 ret = em_push(ctxt);
2579 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2580 u16 tss_selector, int reason,
2581 bool has_error_code, u32 error_code)
2585 ctxt->_eip = ctxt->eip;
2586 ctxt->dst.type = OP_NONE;
2588 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2589 has_error_code, error_code);
2591 if (rc == X86EMUL_CONTINUE)
2592 ctxt->eip = ctxt->_eip;
2594 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2597 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2598 int reg, struct operand *op)
2600 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2602 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2603 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2604 op->addr.mem.seg = seg;
2607 static int em_das(struct x86_emulate_ctxt *ctxt)
2610 bool af, cf, old_cf;
2612 cf = ctxt->eflags & X86_EFLAGS_CF;
2618 af = ctxt->eflags & X86_EFLAGS_AF;
2619 if ((al & 0x0f) > 9 || af) {
2621 cf = old_cf | (al >= 250);
2626 if (old_al > 0x99 || old_cf) {
2632 /* Set PF, ZF, SF */
2633 ctxt->src.type = OP_IMM;
2635 ctxt->src.bytes = 1;
2636 emulate_2op_SrcV(ctxt, "or");
2637 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2639 ctxt->eflags |= X86_EFLAGS_CF;
2641 ctxt->eflags |= X86_EFLAGS_AF;
2642 return X86EMUL_CONTINUE;
2645 static int em_call(struct x86_emulate_ctxt *ctxt)
2648 long rel = ctxt->src.val;
2650 ctxt->src.val = (unsigned long)ctxt->_eip;
2651 rc = jmp_rel(ctxt, rel);
2652 if (rc != X86EMUL_CONTINUE)
2654 return em_push(ctxt);
2657 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2662 struct desc_struct old_desc, new_desc;
2663 const struct x86_emulate_ops *ops = ctxt->ops;
2664 int cpl = ctxt->ops->cpl(ctxt);
2666 old_eip = ctxt->_eip;
2667 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
2669 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2670 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2672 if (rc != X86EMUL_CONTINUE)
2673 return X86EMUL_CONTINUE;
2675 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2676 if (rc != X86EMUL_CONTINUE)
2679 ctxt->src.val = old_cs;
2681 if (rc != X86EMUL_CONTINUE)
2684 ctxt->src.val = old_eip;
2686 /* If we failed, we tainted the memory, but the very least we should
2688 if (rc != X86EMUL_CONTINUE)
2692 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2697 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2702 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2703 if (rc != X86EMUL_CONTINUE)
2705 rc = assign_eip_near(ctxt, eip);
2706 if (rc != X86EMUL_CONTINUE)
2708 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2709 return X86EMUL_CONTINUE;
2712 static int em_add(struct x86_emulate_ctxt *ctxt)
2714 emulate_2op_SrcV(ctxt, "add");
2715 return X86EMUL_CONTINUE;
2718 static int em_or(struct x86_emulate_ctxt *ctxt)
2720 emulate_2op_SrcV(ctxt, "or");
2721 return X86EMUL_CONTINUE;
2724 static int em_adc(struct x86_emulate_ctxt *ctxt)
2726 emulate_2op_SrcV(ctxt, "adc");
2727 return X86EMUL_CONTINUE;
2730 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2732 emulate_2op_SrcV(ctxt, "sbb");
2733 return X86EMUL_CONTINUE;
2736 static int em_and(struct x86_emulate_ctxt *ctxt)
2738 emulate_2op_SrcV(ctxt, "and");
2739 return X86EMUL_CONTINUE;
2742 static int em_sub(struct x86_emulate_ctxt *ctxt)
2744 emulate_2op_SrcV(ctxt, "sub");
2745 return X86EMUL_CONTINUE;
2748 static int em_xor(struct x86_emulate_ctxt *ctxt)
2750 emulate_2op_SrcV(ctxt, "xor");
2751 return X86EMUL_CONTINUE;
2754 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2756 emulate_2op_SrcV(ctxt, "cmp");
2757 /* Disable writeback. */
2758 ctxt->dst.type = OP_NONE;
2759 return X86EMUL_CONTINUE;
2762 static int em_test(struct x86_emulate_ctxt *ctxt)
2764 emulate_2op_SrcV(ctxt, "test");
2765 /* Disable writeback. */
2766 ctxt->dst.type = OP_NONE;
2767 return X86EMUL_CONTINUE;
2770 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2772 /* Write back the register source. */
2773 ctxt->src.val = ctxt->dst.val;
2774 write_register_operand(&ctxt->src);
2776 /* Write back the memory destination with implicit LOCK prefix. */
2777 ctxt->dst.val = ctxt->src.orig_val;
2778 ctxt->lock_prefix = 1;
2779 return X86EMUL_CONTINUE;
2782 static int em_imul(struct x86_emulate_ctxt *ctxt)
2784 emulate_2op_SrcV_nobyte(ctxt, "imul");
2785 return X86EMUL_CONTINUE;
2788 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2790 ctxt->dst.val = ctxt->src2.val;
2791 return em_imul(ctxt);
2794 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2796 ctxt->dst.type = OP_REG;
2797 ctxt->dst.bytes = ctxt->src.bytes;
2798 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2799 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2801 return X86EMUL_CONTINUE;
2804 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2808 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2809 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2810 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2811 return X86EMUL_CONTINUE;
2814 static int em_mov(struct x86_emulate_ctxt *ctxt)
2816 ctxt->dst.val = ctxt->src.val;
2817 return X86EMUL_CONTINUE;
2820 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2822 if (ctxt->modrm_reg > VCPU_SREG_GS)
2823 return emulate_ud(ctxt);
2825 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2826 return X86EMUL_CONTINUE;
2829 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2831 u16 sel = ctxt->src.val;
2833 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2834 return emulate_ud(ctxt);
2836 if (ctxt->modrm_reg == VCPU_SREG_SS)
2837 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2839 /* Disable writeback. */
2840 ctxt->dst.type = OP_NONE;
2841 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2844 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2846 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2847 return X86EMUL_CONTINUE;
2850 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2855 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2856 if (rc == X86EMUL_CONTINUE)
2857 ctxt->ops->invlpg(ctxt, linear);
2858 /* Disable writeback. */
2859 ctxt->dst.type = OP_NONE;
2860 return X86EMUL_CONTINUE;
2863 static int em_clts(struct x86_emulate_ctxt *ctxt)
2867 cr0 = ctxt->ops->get_cr(ctxt, 0);
2869 ctxt->ops->set_cr(ctxt, 0, cr0);
2870 return X86EMUL_CONTINUE;
2873 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2877 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2878 return X86EMUL_UNHANDLEABLE;
2880 rc = ctxt->ops->fix_hypercall(ctxt);
2881 if (rc != X86EMUL_CONTINUE)
2884 /* Let the processor re-execute the fixed hypercall */
2885 ctxt->_eip = ctxt->eip;
2886 /* Disable writeback. */
2887 ctxt->dst.type = OP_NONE;
2888 return X86EMUL_CONTINUE;
2891 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2893 struct desc_ptr desc_ptr;
2896 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2897 &desc_ptr.size, &desc_ptr.address,
2899 if (rc != X86EMUL_CONTINUE)
2901 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2902 /* Disable writeback. */
2903 ctxt->dst.type = OP_NONE;
2904 return X86EMUL_CONTINUE;
2907 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2911 rc = ctxt->ops->fix_hypercall(ctxt);
2913 /* Disable writeback. */
2914 ctxt->dst.type = OP_NONE;
2918 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2920 struct desc_ptr desc_ptr;
2923 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2924 &desc_ptr.size, &desc_ptr.address,
2926 if (rc != X86EMUL_CONTINUE)
2928 ctxt->ops->set_idt(ctxt, &desc_ptr);
2929 /* Disable writeback. */
2930 ctxt->dst.type = OP_NONE;
2931 return X86EMUL_CONTINUE;
2934 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2936 ctxt->dst.bytes = 2;
2937 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2938 return X86EMUL_CONTINUE;
2941 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2943 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2944 | (ctxt->src.val & 0x0f));
2945 ctxt->dst.type = OP_NONE;
2946 return X86EMUL_CONTINUE;
2949 static int em_loop(struct x86_emulate_ctxt *ctxt)
2951 int rc = X86EMUL_CONTINUE;
2953 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2954 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2955 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2956 rc = jmp_rel(ctxt, ctxt->src.val);
2961 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2963 int rc = X86EMUL_CONTINUE;
2965 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2966 rc = jmp_rel(ctxt, ctxt->src.val);
2971 static int em_cli(struct x86_emulate_ctxt *ctxt)
2973 if (emulator_bad_iopl(ctxt))
2974 return emulate_gp(ctxt, 0);
2976 ctxt->eflags &= ~X86_EFLAGS_IF;
2977 return X86EMUL_CONTINUE;
2980 static int em_sti(struct x86_emulate_ctxt *ctxt)
2982 if (emulator_bad_iopl(ctxt))
2983 return emulate_gp(ctxt, 0);
2985 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2986 ctxt->eflags |= X86_EFLAGS_IF;
2987 return X86EMUL_CONTINUE;
2990 static bool valid_cr(int nr)
3002 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3004 if (!valid_cr(ctxt->modrm_reg))
3005 return emulate_ud(ctxt);
3007 return X86EMUL_CONTINUE;
3010 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3012 u64 new_val = ctxt->src.val64;
3013 int cr = ctxt->modrm_reg;
3016 static u64 cr_reserved_bits[] = {
3017 0xffffffff00000000ULL,
3018 0, 0, 0, /* CR3 checked later */
3025 return emulate_ud(ctxt);
3027 if (new_val & cr_reserved_bits[cr])
3028 return emulate_gp(ctxt, 0);
3033 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3034 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3035 return emulate_gp(ctxt, 0);
3037 cr4 = ctxt->ops->get_cr(ctxt, 4);
3038 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3040 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3041 !(cr4 & X86_CR4_PAE))
3042 return emulate_gp(ctxt, 0);
3049 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3050 if (efer & EFER_LMA)
3051 rsvd = CR3_L_MODE_RESERVED_BITS;
3052 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3053 rsvd = CR3_PAE_RESERVED_BITS;
3054 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3055 rsvd = CR3_NONPAE_RESERVED_BITS;
3058 return emulate_gp(ctxt, 0);
3065 cr4 = ctxt->ops->get_cr(ctxt, 4);
3066 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3068 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3069 return emulate_gp(ctxt, 0);
3075 return X86EMUL_CONTINUE;
3078 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3082 ctxt->ops->get_dr(ctxt, 7, &dr7);
3084 /* Check if DR7.Global_Enable is set */
3085 return dr7 & (1 << 13);
3088 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3090 int dr = ctxt->modrm_reg;
3094 return emulate_ud(ctxt);
3096 cr4 = ctxt->ops->get_cr(ctxt, 4);
3097 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3098 return emulate_ud(ctxt);
3100 if (check_dr7_gd(ctxt))
3101 return emulate_db(ctxt);
3103 return X86EMUL_CONTINUE;
3106 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3108 u64 new_val = ctxt->src.val64;
3109 int dr = ctxt->modrm_reg;
3111 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3112 return emulate_gp(ctxt, 0);
3114 return check_dr_read(ctxt);
3117 static int check_svme(struct x86_emulate_ctxt *ctxt)
3121 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3123 if (!(efer & EFER_SVME))
3124 return emulate_ud(ctxt);
3126 return X86EMUL_CONTINUE;
3129 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3131 u64 rax = ctxt->regs[VCPU_REGS_RAX];
3133 /* Valid physical address? */
3134 if (rax & 0xffff000000000000ULL)
3135 return emulate_gp(ctxt, 0);
3137 return check_svme(ctxt);
3140 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3142 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3144 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3145 return emulate_ud(ctxt);
3147 return X86EMUL_CONTINUE;
3150 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3152 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3153 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3155 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3157 return emulate_gp(ctxt, 0);
3159 return X86EMUL_CONTINUE;
3162 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3164 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3165 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3166 return emulate_gp(ctxt, 0);
3168 return X86EMUL_CONTINUE;
3171 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3173 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3174 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3175 return emulate_gp(ctxt, 0);
3177 return X86EMUL_CONTINUE;
3180 #define D(_y) { .flags = (_y) }
3181 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3182 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3183 .check_perm = (_p) }
3185 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3186 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3187 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3188 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3189 #define II(_f, _e, _i) \
3190 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3191 #define IIP(_f, _e, _i, _p) \
3192 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3193 .check_perm = (_p) }
3194 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3196 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3197 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3198 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3200 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3201 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3202 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3204 static struct opcode group7_rm1[] = {
3205 DI(SrcNone | ModRM | Priv, monitor),
3206 DI(SrcNone | ModRM | Priv, mwait),
3210 static struct opcode group7_rm3[] = {
3211 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3212 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3213 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3214 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3215 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3216 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3217 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3218 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3221 static struct opcode group7_rm7[] = {
3223 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3227 static struct opcode group1[] = {
3238 static struct opcode group1A[] = {
3239 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3242 static struct opcode group3[] = {
3243 I(DstMem | SrcImm | ModRM, em_test),
3244 I(DstMem | SrcImm | ModRM, em_test),
3245 I(DstMem | SrcNone | ModRM | Lock, em_not),
3246 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3247 I(SrcMem | ModRM, em_mul_ex),
3248 I(SrcMem | ModRM, em_imul_ex),
3249 I(SrcMem | ModRM, em_div_ex),
3250 I(SrcMem | ModRM, em_idiv_ex),
3253 static struct opcode group4[] = {
3254 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3258 static struct opcode group5[] = {
3259 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3260 D(SrcMem | ModRM | Stack),
3261 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3262 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3263 D(SrcMem | ModRM | Stack), N,
3266 static struct opcode group6[] = {
3267 DI(ModRM | Prot, sldt),
3268 DI(ModRM | Prot, str),
3269 DI(ModRM | Prot | Priv, lldt),
3270 DI(ModRM | Prot | Priv, ltr),
3274 static struct group_dual group7 = { {
3275 DI(ModRM | Mov | DstMem | Priv, sgdt),
3276 DI(ModRM | Mov | DstMem | Priv, sidt),
3277 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3278 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3279 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3280 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3281 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3283 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3285 N, EXT(0, group7_rm3),
3286 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3287 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3290 static struct opcode group8[] = {
3292 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3293 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3296 static struct group_dual group9 = { {
3297 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3299 N, N, N, N, N, N, N, N,
3302 static struct opcode group11[] = {
3303 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3306 static struct gprefix pfx_0f_6f_0f_7f = {
3307 N, N, N, I(Sse, em_movdqu),
3310 static struct opcode opcode_table[256] = {
3312 I6ALU(Lock, em_add),
3313 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3314 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3317 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3320 I6ALU(Lock, em_adc),
3321 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3322 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3324 I6ALU(Lock, em_sbb),
3325 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3326 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3328 I6ALU(Lock, em_and), N, N,
3330 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3332 I6ALU(Lock, em_xor), N, N,
3334 I6ALU(0, em_cmp), N, N,
3338 X8(I(SrcReg | Stack, em_push)),
3340 X8(I(DstReg | Stack, em_pop)),
3342 I(ImplicitOps | Stack | No64, em_pusha),
3343 I(ImplicitOps | Stack | No64, em_popa),
3344 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3347 I(SrcImm | Mov | Stack, em_push),
3348 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3349 I(SrcImmByte | Mov | Stack, em_push),
3350 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3351 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3352 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3356 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3357 G(DstMem | SrcImm | ModRM | Group, group1),
3358 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3359 G(DstMem | SrcImmByte | ModRM | Group, group1),
3360 I2bv(DstMem | SrcReg | ModRM, em_test),
3361 I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3363 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3364 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3365 I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3366 D(ModRM | SrcMem | NoAccess | DstReg),
3367 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3370 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3372 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3373 I(SrcImmFAddr | No64, em_call_far), N,
3374 II(ImplicitOps | Stack, em_pushf, pushf),
3375 II(ImplicitOps | Stack, em_popf, popf), N, N,
3377 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3378 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3379 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3380 I2bv(SrcSI | DstDI | String, em_cmp),
3382 I2bv(DstAcc | SrcImm, em_test),
3383 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3384 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3385 I2bv(SrcAcc | DstDI | String, em_cmp),
3387 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3389 X8(I(DstReg | SrcImm | Mov, em_mov)),
3391 D2bv(DstMem | SrcImmByte | ModRM),
3392 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3393 I(ImplicitOps | Stack, em_ret),
3394 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3395 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3396 G(ByteOp, group11), G(0, group11),
3398 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3399 D(ImplicitOps), DI(SrcImmByte, intn),
3400 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3402 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3405 N, N, N, N, N, N, N, N,
3407 X3(I(SrcImmByte, em_loop)),
3408 I(SrcImmByte, em_jcxz),
3409 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3410 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3412 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3413 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3414 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3415 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3417 N, DI(ImplicitOps, icebp), N, N,
3418 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3419 G(ByteOp, group3), G(0, group3),
3421 D(ImplicitOps), D(ImplicitOps),
3422 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3423 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3426 static struct opcode twobyte_table[256] = {
3428 G(0, group6), GD(0, &group7), N, N,
3429 N, I(ImplicitOps | VendorSpecific, em_syscall),
3430 II(ImplicitOps | Priv, em_clts, clts), N,
3431 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3432 N, D(ImplicitOps | ModRM), N, N,
3434 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3436 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3437 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3438 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3439 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3441 N, N, N, N, N, N, N, N,
3443 DI(ImplicitOps | Priv, wrmsr),
3444 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3445 DI(ImplicitOps | Priv, rdmsr),
3446 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3447 I(ImplicitOps | VendorSpecific, em_sysenter),
3448 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3450 N, N, N, N, N, N, N, N,
3452 X16(D(DstReg | SrcMem | ModRM | Mov)),
3454 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3459 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3464 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3468 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3470 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3471 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3472 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3473 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3475 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3476 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3477 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3478 D(DstMem | SrcReg | Src2CL | ModRM),
3479 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3481 D2bv(DstMem | SrcReg | ModRM | Lock),
3482 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3483 D(DstMem | SrcReg | ModRM | BitOp | Lock),
3484 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3485 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3486 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3489 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3490 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3491 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3493 D2bv(DstMem | SrcReg | ModRM | Lock),
3494 N, D(DstMem | SrcReg | ModRM | Mov),
3495 N, N, N, GD(0, &group9),
3496 N, N, N, N, N, N, N, N,
3498 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3500 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3502 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3518 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3522 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3528 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3529 unsigned size, bool sign_extension)
3531 int rc = X86EMUL_CONTINUE;
3535 op->addr.mem.ea = ctxt->_eip;
3536 /* NB. Immediates are sign-extended as necessary. */
3537 switch (op->bytes) {
3539 op->val = insn_fetch(s8, ctxt);
3542 op->val = insn_fetch(s16, ctxt);
3545 op->val = insn_fetch(s32, ctxt);
3548 if (!sign_extension) {
3549 switch (op->bytes) {
3557 op->val &= 0xffffffff;
3565 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3568 int rc = X86EMUL_CONTINUE;
3572 decode_register_operand(ctxt, op,
3574 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3577 rc = decode_imm(ctxt, op, 1, false);
3580 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3584 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3585 fetch_bit_operand(ctxt);
3586 op->orig_val = op->val;
3589 ctxt->memop.bytes = 8;
3593 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3594 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3595 fetch_register_operand(op);
3596 op->orig_val = op->val;
3600 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3602 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3603 op->addr.mem.seg = VCPU_SREG_ES;
3609 op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3610 fetch_register_operand(op);
3614 op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3617 rc = decode_imm(ctxt, op, 1, true);
3624 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3627 ctxt->memop.bytes = 2;
3630 ctxt->memop.bytes = 4;
3633 rc = decode_imm(ctxt, op, 2, false);
3636 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3640 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3642 register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3643 op->addr.mem.seg = seg_override(ctxt);
3648 op->addr.mem.ea = ctxt->_eip;
3649 op->bytes = ctxt->op_bytes + 2;
3650 insn_fetch_arr(op->valptr, op->bytes, ctxt);
3653 ctxt->memop.bytes = ctxt->op_bytes + 2;
3656 op->val = VCPU_SREG_ES;
3659 op->val = VCPU_SREG_CS;
3662 op->val = VCPU_SREG_SS;
3665 op->val = VCPU_SREG_DS;
3668 op->val = VCPU_SREG_FS;
3671 op->val = VCPU_SREG_GS;
3674 /* Special instructions do their own operand decoding. */
3676 op->type = OP_NONE; /* Disable writeback. */
3684 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3686 int rc = X86EMUL_CONTINUE;
3687 int mode = ctxt->mode;
3688 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3689 bool op_prefix = false;
3690 struct opcode opcode;
3692 ctxt->memop.type = OP_NONE;
3693 ctxt->memopp = NULL;
3694 ctxt->_eip = ctxt->eip;
3695 ctxt->fetch.start = ctxt->_eip;
3696 ctxt->fetch.end = ctxt->fetch.start + insn_len;
3698 memcpy(ctxt->fetch.data, insn, insn_len);
3701 case X86EMUL_MODE_REAL:
3702 case X86EMUL_MODE_VM86:
3703 case X86EMUL_MODE_PROT16:
3704 def_op_bytes = def_ad_bytes = 2;
3706 case X86EMUL_MODE_PROT32:
3707 def_op_bytes = def_ad_bytes = 4;
3709 #ifdef CONFIG_X86_64
3710 case X86EMUL_MODE_PROT64:
3716 return EMULATION_FAILED;
3719 ctxt->op_bytes = def_op_bytes;
3720 ctxt->ad_bytes = def_ad_bytes;
3722 /* Legacy prefixes. */
3724 switch (ctxt->b = insn_fetch(u8, ctxt)) {
3725 case 0x66: /* operand-size override */
3727 /* switch between 2/4 bytes */
3728 ctxt->op_bytes = def_op_bytes ^ 6;
3730 case 0x67: /* address-size override */
3731 if (mode == X86EMUL_MODE_PROT64)
3732 /* switch between 4/8 bytes */
3733 ctxt->ad_bytes = def_ad_bytes ^ 12;
3735 /* switch between 2/4 bytes */
3736 ctxt->ad_bytes = def_ad_bytes ^ 6;
3738 case 0x26: /* ES override */
3739 case 0x2e: /* CS override */
3740 case 0x36: /* SS override */
3741 case 0x3e: /* DS override */
3742 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3744 case 0x64: /* FS override */
3745 case 0x65: /* GS override */
3746 set_seg_override(ctxt, ctxt->b & 7);
3748 case 0x40 ... 0x4f: /* REX */
3749 if (mode != X86EMUL_MODE_PROT64)
3751 ctxt->rex_prefix = ctxt->b;
3753 case 0xf0: /* LOCK */
3754 ctxt->lock_prefix = 1;
3756 case 0xf2: /* REPNE/REPNZ */
3757 case 0xf3: /* REP/REPE/REPZ */
3758 ctxt->rep_prefix = ctxt->b;
3764 /* Any legacy prefix after a REX prefix nullifies its effect. */
3766 ctxt->rex_prefix = 0;
3772 if (ctxt->rex_prefix & 8)
3773 ctxt->op_bytes = 8; /* REX.W */
3775 /* Opcode byte(s). */
3776 opcode = opcode_table[ctxt->b];
3777 /* Two-byte opcode? */
3778 if (ctxt->b == 0x0f) {
3780 ctxt->b = insn_fetch(u8, ctxt);
3781 opcode = twobyte_table[ctxt->b];
3783 ctxt->d = opcode.flags;
3785 while (ctxt->d & GroupMask) {
3786 switch (ctxt->d & GroupMask) {
3788 ctxt->modrm = insn_fetch(u8, ctxt);
3790 goffset = (ctxt->modrm >> 3) & 7;
3791 opcode = opcode.u.group[goffset];
3794 ctxt->modrm = insn_fetch(u8, ctxt);
3796 goffset = (ctxt->modrm >> 3) & 7;
3797 if ((ctxt->modrm >> 6) == 3)
3798 opcode = opcode.u.gdual->mod3[goffset];
3800 opcode = opcode.u.gdual->mod012[goffset];
3803 goffset = ctxt->modrm & 7;
3804 opcode = opcode.u.group[goffset];
3807 if (ctxt->rep_prefix && op_prefix)
3808 return EMULATION_FAILED;
3809 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3810 switch (simd_prefix) {
3811 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3812 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3813 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3814 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3818 return EMULATION_FAILED;
3821 ctxt->d &= ~(u64)GroupMask;
3822 ctxt->d |= opcode.flags;
3825 ctxt->execute = opcode.u.execute;
3826 ctxt->check_perm = opcode.check_perm;
3827 ctxt->intercept = opcode.intercept;
3830 if (ctxt->d == 0 || (ctxt->d & Undefined))
3831 return EMULATION_FAILED;
3833 if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3834 return EMULATION_FAILED;
3836 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3839 if (ctxt->d & Op3264) {
3840 if (mode == X86EMUL_MODE_PROT64)
3847 ctxt->op_bytes = 16;
3849 /* ModRM and SIB bytes. */
3850 if (ctxt->d & ModRM) {
3851 rc = decode_modrm(ctxt, &ctxt->memop);
3852 if (!ctxt->has_seg_override)
3853 set_seg_override(ctxt, ctxt->modrm_seg);
3854 } else if (ctxt->d & MemAbs)
3855 rc = decode_abs(ctxt, &ctxt->memop);
3856 if (rc != X86EMUL_CONTINUE)
3859 if (!ctxt->has_seg_override)
3860 set_seg_override(ctxt, VCPU_SREG_DS);
3862 ctxt->memop.addr.mem.seg = seg_override(ctxt);
3864 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
3865 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
3868 * Decode and fetch the source operand: register, memory
3871 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
3872 if (rc != X86EMUL_CONTINUE)
3876 * Decode and fetch the second source operand: register, memory
3879 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
3880 if (rc != X86EMUL_CONTINUE)
3883 /* Decode and fetch the destination operand: register or memory. */
3884 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
3887 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
3888 ctxt->memopp->addr.mem.ea += ctxt->_eip;
3890 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
3893 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3895 /* The second termination condition only applies for REPE
3896 * and REPNE. Test if the repeat string operation prefix is
3897 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3898 * corresponding termination condition according to:
3899 * - if REPE/REPZ and ZF = 0 then done
3900 * - if REPNE/REPNZ and ZF = 1 then done
3902 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3903 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3904 && (((ctxt->rep_prefix == REPE_PREFIX) &&
3905 ((ctxt->eflags & EFLG_ZF) == 0))
3906 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
3907 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3913 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3915 struct x86_emulate_ops *ops = ctxt->ops;
3917 int rc = X86EMUL_CONTINUE;
3918 int saved_dst_type = ctxt->dst.type;
3920 ctxt->mem_read.pos = 0;
3922 if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3923 rc = emulate_ud(ctxt);
3927 /* LOCK prefix is allowed only with some instructions */
3928 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3929 rc = emulate_ud(ctxt);
3933 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3934 rc = emulate_ud(ctxt);
3939 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3940 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3941 rc = emulate_ud(ctxt);
3945 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3946 rc = emulate_nm(ctxt);
3950 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3951 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3952 X86_ICPT_PRE_EXCEPT);
3953 if (rc != X86EMUL_CONTINUE)
3957 /* Privileged instruction can be executed only in CPL=0 */
3958 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3959 rc = emulate_gp(ctxt, 0);
3963 /* Instruction can only be executed in protected mode */
3964 if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3965 rc = emulate_ud(ctxt);
3969 /* Do instruction specific permission checks */
3970 if (ctxt->check_perm) {
3971 rc = ctxt->check_perm(ctxt);
3972 if (rc != X86EMUL_CONTINUE)
3976 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3977 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3978 X86_ICPT_POST_EXCEPT);
3979 if (rc != X86EMUL_CONTINUE)
3983 if (ctxt->rep_prefix && (ctxt->d & String)) {
3984 /* All REP prefixes have the same first termination condition */
3985 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3986 ctxt->eip = ctxt->_eip;
3991 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3992 rc = segmented_read(ctxt, ctxt->src.addr.mem,
3993 ctxt->src.valptr, ctxt->src.bytes);
3994 if (rc != X86EMUL_CONTINUE)
3996 ctxt->src.orig_val64 = ctxt->src.val64;
3999 if (ctxt->src2.type == OP_MEM) {
4000 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4001 &ctxt->src2.val, ctxt->src2.bytes);
4002 if (rc != X86EMUL_CONTINUE)
4006 if ((ctxt->d & DstMask) == ImplicitOps)
4010 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4011 /* optimisation - avoid slow emulated read if Mov */
4012 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4013 &ctxt->dst.val, ctxt->dst.bytes);
4014 if (rc != X86EMUL_CONTINUE)
4017 /* Copy full 64-bit value for CMPXCHG8B. */
4018 ctxt->dst.orig_val64 = ctxt->dst.val64;
4022 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4023 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4024 X86_ICPT_POST_MEMACCESS);
4025 if (rc != X86EMUL_CONTINUE)
4029 if (ctxt->execute) {
4030 rc = ctxt->execute(ctxt);
4031 if (rc != X86EMUL_CONTINUE)
4040 case 0x40 ... 0x47: /* inc r16/r32 */
4041 emulate_1op(ctxt, "inc");
4043 case 0x48 ... 0x4f: /* dec r16/r32 */
4044 emulate_1op(ctxt, "dec");
4046 case 0x63: /* movsxd */
4047 if (ctxt->mode != X86EMUL_MODE_PROT64)
4048 goto cannot_emulate;
4049 ctxt->dst.val = (s32) ctxt->src.val;
4051 case 0x6c: /* insb */
4052 case 0x6d: /* insw/insd */
4053 ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
4055 case 0x6e: /* outsb */
4056 case 0x6f: /* outsw/outsd */
4057 ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
4060 case 0x70 ... 0x7f: /* jcc (short) */
4061 if (test_cc(ctxt->b, ctxt->eflags))
4062 rc = jmp_rel(ctxt, ctxt->src.val);
4064 case 0x8d: /* lea r16/r32, m */
4065 ctxt->dst.val = ctxt->src.addr.mem.ea;
4067 case 0x8f: /* pop (sole member of Grp1a) */
4068 rc = em_grp1a(ctxt);
4070 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4071 if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
4075 case 0x98: /* cbw/cwde/cdqe */
4076 switch (ctxt->op_bytes) {
4077 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4078 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4079 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4085 case 0xcc: /* int3 */
4086 rc = emulate_int(ctxt, 3);
4088 case 0xcd: /* int n */
4089 rc = emulate_int(ctxt, ctxt->src.val);
4091 case 0xce: /* into */
4092 if (ctxt->eflags & EFLG_OF)
4093 rc = emulate_int(ctxt, 4);
4095 case 0xd0 ... 0xd1: /* Grp2 */
4098 case 0xd2 ... 0xd3: /* Grp2 */
4099 ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
4102 case 0xe4: /* inb */
4105 case 0xe6: /* outb */
4106 case 0xe7: /* out */
4108 case 0xe9: /* jmp rel */
4109 case 0xeb: /* jmp rel short */
4110 rc = jmp_rel(ctxt, ctxt->src.val);
4111 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4113 case 0xec: /* in al,dx */
4114 case 0xed: /* in (e/r)ax,dx */
4116 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
4118 goto done; /* IO is needed */
4120 case 0xee: /* out dx,al */
4121 case 0xef: /* out dx,(e/r)ax */
4123 ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
4125 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4127 case 0xf4: /* hlt */
4128 ctxt->ops->halt(ctxt);
4130 case 0xf5: /* cmc */
4131 /* complement carry flag from eflags reg */
4132 ctxt->eflags ^= EFLG_CF;
4134 case 0xf8: /* clc */
4135 ctxt->eflags &= ~EFLG_CF;
4137 case 0xf9: /* stc */
4138 ctxt->eflags |= EFLG_CF;
4140 case 0xfc: /* cld */
4141 ctxt->eflags &= ~EFLG_DF;
4143 case 0xfd: /* std */
4144 ctxt->eflags |= EFLG_DF;
4146 case 0xfe: /* Grp4 */
4147 rc = em_grp45(ctxt);
4149 case 0xff: /* Grp5 */
4150 rc = em_grp45(ctxt);
4153 goto cannot_emulate;
4156 if (rc != X86EMUL_CONTINUE)
4160 rc = writeback(ctxt);
4161 if (rc != X86EMUL_CONTINUE)
4165 * restore dst type in case the decoding will be reused
4166 * (happens for string instruction )
4168 ctxt->dst.type = saved_dst_type;
4170 if ((ctxt->d & SrcMask) == SrcSI)
4171 string_addr_inc(ctxt, seg_override(ctxt),
4172 VCPU_REGS_RSI, &ctxt->src);
4174 if ((ctxt->d & DstMask) == DstDI)
4175 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4178 if (ctxt->rep_prefix && (ctxt->d & String)) {
4179 struct read_cache *r = &ctxt->io_read;
4180 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4182 if (!string_insn_completed(ctxt)) {
4184 * Re-enter guest when pio read ahead buffer is empty
4185 * or, if it is not used, after each 1024 iteration.
4187 if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4188 (r->end == 0 || r->end != r->pos)) {
4190 * Reset read cache. Usually happens before
4191 * decode, but since instruction is restarted
4192 * we have to do it here.
4194 ctxt->mem_read.end = 0;
4195 return EMULATION_RESTART;
4197 goto done; /* skip rip writeback */
4201 ctxt->eip = ctxt->_eip;
4204 if (rc == X86EMUL_PROPAGATE_FAULT)
4205 ctxt->have_exception = true;
4206 if (rc == X86EMUL_INTERCEPTED)
4207 return EMULATION_INTERCEPTED;
4209 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4213 case 0x09: /* wbinvd */
4214 (ctxt->ops->wbinvd)(ctxt);
4216 case 0x08: /* invd */
4217 case 0x0d: /* GrpP (prefetch) */
4218 case 0x18: /* Grp16 (prefetch/nop) */
4220 case 0x20: /* mov cr, reg */
4221 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4223 case 0x21: /* mov from dr to reg */
4224 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4226 case 0x22: /* mov reg, cr */
4227 if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4228 emulate_gp(ctxt, 0);
4229 rc = X86EMUL_PROPAGATE_FAULT;
4232 ctxt->dst.type = OP_NONE;
4234 case 0x23: /* mov from reg to dr */
4235 if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4236 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4237 ~0ULL : ~0U)) < 0) {
4238 /* #UD condition is already handled by the code above */
4239 emulate_gp(ctxt, 0);
4240 rc = X86EMUL_PROPAGATE_FAULT;
4244 ctxt->dst.type = OP_NONE; /* no writeback */
4248 msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4249 | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4250 if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4251 emulate_gp(ctxt, 0);
4252 rc = X86EMUL_PROPAGATE_FAULT;
4255 rc = X86EMUL_CONTINUE;
4259 if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4260 emulate_gp(ctxt, 0);
4261 rc = X86EMUL_PROPAGATE_FAULT;
4264 ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4265 ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4267 rc = X86EMUL_CONTINUE;
4269 case 0x40 ... 0x4f: /* cmov */
4270 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4271 if (!test_cc(ctxt->b, ctxt->eflags))
4272 ctxt->dst.type = OP_NONE; /* no writeback */
4274 case 0x80 ... 0x8f: /* jnz rel, etc*/
4275 if (test_cc(ctxt->b, ctxt->eflags))
4276 rc = jmp_rel(ctxt, ctxt->src.val);
4278 case 0x90 ... 0x9f: /* setcc r/m8 */
4279 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4283 ctxt->dst.type = OP_NONE;
4284 /* only subword offset */
4285 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4286 emulate_2op_SrcV_nobyte(ctxt, "bt");
4288 case 0xa4: /* shld imm8, r, r/m */
4289 case 0xa5: /* shld cl, r, r/m */
4290 emulate_2op_cl(ctxt, "shld");
4294 emulate_2op_SrcV_nobyte(ctxt, "bts");
4296 case 0xac: /* shrd imm8, r, r/m */
4297 case 0xad: /* shrd cl, r, r/m */
4298 emulate_2op_cl(ctxt, "shrd");
4300 case 0xae: /* clflush */
4302 case 0xb0 ... 0xb1: /* cmpxchg */
4304 * Save real source value, then compare EAX against
4307 ctxt->src.orig_val = ctxt->src.val;
4308 ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4309 emulate_2op_SrcV(ctxt, "cmp");
4310 if (ctxt->eflags & EFLG_ZF) {
4311 /* Success: write back to memory. */
4312 ctxt->dst.val = ctxt->src.orig_val;
4314 /* Failure: write the value we saw to EAX. */
4315 ctxt->dst.type = OP_REG;
4316 ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4321 emulate_2op_SrcV_nobyte(ctxt, "btr");
4323 case 0xb6 ... 0xb7: /* movzx */
4324 ctxt->dst.bytes = ctxt->op_bytes;
4325 ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4326 : (u16) ctxt->src.val;
4328 case 0xba: /* Grp8 */
4329 switch (ctxt->modrm_reg & 3) {
4342 emulate_2op_SrcV_nobyte(ctxt, "btc");
4344 case 0xbc: { /* bsf */
4346 __asm__ ("bsf %2, %0; setz %1"
4347 : "=r"(ctxt->dst.val), "=q"(zf)
4348 : "r"(ctxt->src.val));
4349 ctxt->eflags &= ~X86_EFLAGS_ZF;
4351 ctxt->eflags |= X86_EFLAGS_ZF;
4352 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4356 case 0xbd: { /* bsr */
4358 __asm__ ("bsr %2, %0; setz %1"
4359 : "=r"(ctxt->dst.val), "=q"(zf)
4360 : "r"(ctxt->src.val));
4361 ctxt->eflags &= ~X86_EFLAGS_ZF;
4363 ctxt->eflags |= X86_EFLAGS_ZF;
4364 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4368 case 0xbe ... 0xbf: /* movsx */
4369 ctxt->dst.bytes = ctxt->op_bytes;
4370 ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4371 (s16) ctxt->src.val;
4373 case 0xc0 ... 0xc1: /* xadd */
4374 emulate_2op_SrcV(ctxt, "add");
4375 /* Write back the register source. */
4376 ctxt->src.val = ctxt->dst.orig_val;
4377 write_register_operand(&ctxt->src);
4379 case 0xc3: /* movnti */
4380 ctxt->dst.bytes = ctxt->op_bytes;
4381 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4382 (u64) ctxt->src.val;
4384 case 0xc7: /* Grp9 (cmpxchg8b) */
4388 goto cannot_emulate;
4391 if (rc != X86EMUL_CONTINUE)
4397 return EMULATION_FAILED;