1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54 #define OpES 20ull /* ES */
55 #define OpCS 21ull /* CS */
56 #define OpSS 22ull /* SS */
57 #define OpDS 23ull /* DS */
58 #define OpFS 24ull /* FS */
59 #define OpGS 25ull /* GS */
61 #define OpBits 5 /* Width of operand field */
62 #define OpMask ((1ull << OpBits) - 1)
65 * Opcode effective-address decode tables.
66 * Note that we only emulate instructions that have at least one memory
67 * operand (excluding implicit stack references). We assume that stack
68 * references and instruction fetches will never occur in special memory
69 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
73 /* Operand sizes: 8-bit operands or specified/overridden size. */
74 #define ByteOp (1<<0) /* 8-bit operands. */
75 /* Destination operand type. */
77 #define ImplicitOps (OpImplicit << DstShift)
78 #define DstReg (OpReg << DstShift)
79 #define DstMem (OpMem << DstShift)
80 #define DstAcc (OpAcc << DstShift)
81 #define DstDI (OpDI << DstShift)
82 #define DstMem64 (OpMem64 << DstShift)
83 #define DstImmUByte (OpImmUByte << DstShift)
84 #define DstDX (OpDX << DstShift)
85 #define DstMask (OpMask << DstShift)
86 /* Source operand type. */
88 #define SrcNone (OpNone << SrcShift)
89 #define SrcReg (OpReg << SrcShift)
90 #define SrcMem (OpMem << SrcShift)
91 #define SrcMem16 (OpMem16 << SrcShift)
92 #define SrcMem32 (OpMem32 << SrcShift)
93 #define SrcImm (OpImm << SrcShift)
94 #define SrcImmByte (OpImmByte << SrcShift)
95 #define SrcOne (OpOne << SrcShift)
96 #define SrcImmUByte (OpImmUByte << SrcShift)
97 #define SrcImmU (OpImmU << SrcShift)
98 #define SrcSI (OpSI << SrcShift)
99 #define SrcImmFAddr (OpImmFAddr << SrcShift)
100 #define SrcMemFAddr (OpMemFAddr << SrcShift)
101 #define SrcAcc (OpAcc << SrcShift)
102 #define SrcImmU16 (OpImmU16 << SrcShift)
103 #define SrcDX (OpDX << SrcShift)
104 #define SrcMask (OpMask << SrcShift)
105 #define BitOp (1<<11)
106 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
107 #define String (1<<13) /* String instruction (rep capable) */
108 #define Stack (1<<14) /* Stack instruction (push/pop) */
109 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
110 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
111 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
112 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
113 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
114 #define Sse (1<<18) /* SSE Vector instruction */
115 /* Generic ModRM decode. */
116 #define ModRM (1<<19)
117 /* Destination is only written; never read. */
120 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
121 #define VendorSpecific (1<<22) /* Vendor specific instruction */
122 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
123 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
124 #define Undefined (1<<25) /* No Such Instruction */
125 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
126 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
128 /* Source 2 operand type */
129 #define Src2Shift (29)
130 #define Src2None (OpNone << Src2Shift)
131 #define Src2CL (OpCL << Src2Shift)
132 #define Src2ImmByte (OpImmByte << Src2Shift)
133 #define Src2One (OpOne << Src2Shift)
134 #define Src2Imm (OpImm << Src2Shift)
135 #define Src2ES (OpES << Src2Shift)
136 #define Src2CS (OpCS << Src2Shift)
137 #define Src2SS (OpSS << Src2Shift)
138 #define Src2DS (OpDS << Src2Shift)
139 #define Src2FS (OpFS << Src2Shift)
140 #define Src2GS (OpGS << Src2Shift)
141 #define Src2Mask (OpMask << Src2Shift)
143 #define X2(x...) x, x
144 #define X3(x...) X2(x), x
145 #define X4(x...) X2(x), X2(x)
146 #define X5(x...) X4(x), x
147 #define X6(x...) X4(x), X2(x)
148 #define X7(x...) X4(x), X3(x)
149 #define X8(x...) X4(x), X4(x)
150 #define X16(x...) X8(x), X8(x)
156 int (*execute)(struct x86_emulate_ctxt *ctxt);
157 struct opcode *group;
158 struct group_dual *gdual;
159 struct gprefix *gprefix;
161 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
165 struct opcode mod012[8];
166 struct opcode mod3[8];
170 struct opcode pfx_no;
171 struct opcode pfx_66;
172 struct opcode pfx_f2;
173 struct opcode pfx_f3;
176 /* EFLAGS bit definitions. */
177 #define EFLG_ID (1<<21)
178 #define EFLG_VIP (1<<20)
179 #define EFLG_VIF (1<<19)
180 #define EFLG_AC (1<<18)
181 #define EFLG_VM (1<<17)
182 #define EFLG_RF (1<<16)
183 #define EFLG_IOPL (3<<12)
184 #define EFLG_NT (1<<14)
185 #define EFLG_OF (1<<11)
186 #define EFLG_DF (1<<10)
187 #define EFLG_IF (1<<9)
188 #define EFLG_TF (1<<8)
189 #define EFLG_SF (1<<7)
190 #define EFLG_ZF (1<<6)
191 #define EFLG_AF (1<<4)
192 #define EFLG_PF (1<<2)
193 #define EFLG_CF (1<<0)
195 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
196 #define EFLG_RESERVED_ONE_MASK 2
199 * Instruction emulation:
200 * Most instructions are emulated directly via a fragment of inline assembly
201 * code. This allows us to save/restore EFLAGS and thus very easily pick up
202 * any modified flags.
205 #if defined(CONFIG_X86_64)
206 #define _LO32 "k" /* force 32-bit operand */
207 #define _STK "%%rsp" /* stack pointer */
208 #elif defined(__i386__)
209 #define _LO32 "" /* force 32-bit operand */
210 #define _STK "%%esp" /* stack pointer */
214 * These EFLAGS bits are restored from saved value during emulation, and
215 * any changes are written back to the saved value after emulation.
217 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
219 /* Before executing instruction: restore necessary bits in EFLAGS. */
220 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
221 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
222 "movl %"_sav",%"_LO32 _tmp"; " \
225 "movl %"_msk",%"_LO32 _tmp"; " \
226 "andl %"_LO32 _tmp",("_STK"); " \
228 "notl %"_LO32 _tmp"; " \
229 "andl %"_LO32 _tmp",("_STK"); " \
230 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
232 "orl %"_LO32 _tmp",("_STK"); " \
236 /* After executing instruction: write-back necessary bits in EFLAGS. */
237 #define _POST_EFLAGS(_sav, _msk, _tmp) \
238 /* _sav |= EFLAGS & _msk; */ \
241 "andl %"_msk",%"_LO32 _tmp"; " \
242 "orl %"_LO32 _tmp",%"_sav"; "
250 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
252 __asm__ __volatile__ ( \
253 _PRE_EFLAGS("0", "4", "2") \
254 _op _suffix " %"_x"3,%1; " \
255 _POST_EFLAGS("0", "4", "2") \
256 : "=m" ((ctxt)->eflags), \
257 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
259 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
263 /* Raw emulation: instruction has two explicit operands. */
264 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
266 unsigned long _tmp; \
268 switch ((ctxt)->dst.bytes) { \
270 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
273 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
276 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
281 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
283 unsigned long _tmp; \
284 switch ((ctxt)->dst.bytes) { \
286 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
289 __emulate_2op_nobyte(ctxt, _op, \
290 _wx, _wy, _lx, _ly, _qx, _qy); \
295 /* Source operand is byte-sized and may be restricted to just %cl. */
296 #define emulate_2op_SrcB(ctxt, _op) \
297 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
299 /* Source operand is byte, word, long or quad sized. */
300 #define emulate_2op_SrcV(ctxt, _op) \
301 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
303 /* Source operand is word, long or quad sized. */
304 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
305 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
307 /* Instruction has three operands and one operand is stored in ECX register */
308 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
310 unsigned long _tmp; \
311 _type _clv = (ctxt)->src2.val; \
312 _type _srcv = (ctxt)->src.val; \
313 _type _dstv = (ctxt)->dst.val; \
315 __asm__ __volatile__ ( \
316 _PRE_EFLAGS("0", "5", "2") \
317 _op _suffix " %4,%1 \n" \
318 _POST_EFLAGS("0", "5", "2") \
319 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
320 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
323 (ctxt)->src2.val = (unsigned long) _clv; \
324 (ctxt)->src2.val = (unsigned long) _srcv; \
325 (ctxt)->dst.val = (unsigned long) _dstv; \
328 #define emulate_2op_cl(ctxt, _op) \
330 switch ((ctxt)->dst.bytes) { \
332 __emulate_2op_cl(ctxt, _op, "w", u16); \
335 __emulate_2op_cl(ctxt, _op, "l", u32); \
338 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
343 #define __emulate_1op(ctxt, _op, _suffix) \
345 unsigned long _tmp; \
347 __asm__ __volatile__ ( \
348 _PRE_EFLAGS("0", "3", "2") \
349 _op _suffix " %1; " \
350 _POST_EFLAGS("0", "3", "2") \
351 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
353 : "i" (EFLAGS_MASK)); \
356 /* Instruction has only one explicit operand (no source operand). */
357 #define emulate_1op(ctxt, _op) \
359 switch ((ctxt)->dst.bytes) { \
360 case 1: __emulate_1op(ctxt, _op, "b"); break; \
361 case 2: __emulate_1op(ctxt, _op, "w"); break; \
362 case 4: __emulate_1op(ctxt, _op, "l"); break; \
363 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
367 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
369 unsigned long _tmp; \
370 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
371 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
373 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0", "5", "1") \
376 _op _suffix " %6; " \
378 _POST_EFLAGS("0", "5", "1") \
379 ".pushsection .fixup,\"ax\" \n\t" \
380 "3: movb $1, %4 \n\t" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
385 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
386 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
387 "a" (*rax), "d" (*rdx)); \
390 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
391 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
393 switch((ctxt)->src.bytes) { \
395 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
398 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
401 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
404 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
409 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
410 enum x86_intercept intercept,
411 enum x86_intercept_stage stage)
413 struct x86_instruction_info info = {
414 .intercept = intercept,
415 .rep_prefix = ctxt->rep_prefix,
416 .modrm_mod = ctxt->modrm_mod,
417 .modrm_reg = ctxt->modrm_reg,
418 .modrm_rm = ctxt->modrm_rm,
419 .src_val = ctxt->src.val64,
420 .src_bytes = ctxt->src.bytes,
421 .dst_bytes = ctxt->dst.bytes,
422 .ad_bytes = ctxt->ad_bytes,
423 .next_rip = ctxt->eip,
426 return ctxt->ops->intercept(ctxt, &info, stage);
429 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
431 return (1UL << (ctxt->ad_bytes << 3)) - 1;
434 /* Access/update address held in a register, based on addressing mode. */
435 static inline unsigned long
436 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
438 if (ctxt->ad_bytes == sizeof(unsigned long))
441 return reg & ad_mask(ctxt);
444 static inline unsigned long
445 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
447 return address_mask(ctxt, reg);
451 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
453 if (ctxt->ad_bytes == sizeof(unsigned long))
456 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
459 static u32 desc_limit_scaled(struct desc_struct *desc)
461 u32 limit = get_desc_limit(desc);
463 return desc->g ? (limit << 12) | 0xfff : limit;
466 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
468 ctxt->has_seg_override = true;
469 ctxt->seg_override = seg;
472 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
474 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
477 return ctxt->ops->get_cached_segment_base(ctxt, seg);
480 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
482 if (!ctxt->has_seg_override)
485 return ctxt->seg_override;
488 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
489 u32 error, bool valid)
491 ctxt->exception.vector = vec;
492 ctxt->exception.error_code = error;
493 ctxt->exception.error_code_valid = valid;
494 return X86EMUL_PROPAGATE_FAULT;
497 static int emulate_db(struct x86_emulate_ctxt *ctxt)
499 return emulate_exception(ctxt, DB_VECTOR, 0, false);
502 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
504 return emulate_exception(ctxt, GP_VECTOR, err, true);
507 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
509 return emulate_exception(ctxt, SS_VECTOR, err, true);
512 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
514 return emulate_exception(ctxt, UD_VECTOR, 0, false);
517 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
519 return emulate_exception(ctxt, TS_VECTOR, err, true);
522 static int emulate_de(struct x86_emulate_ctxt *ctxt)
524 return emulate_exception(ctxt, DE_VECTOR, 0, false);
527 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
529 return emulate_exception(ctxt, NM_VECTOR, 0, false);
532 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
535 switch (ctxt->op_bytes) {
537 ctxt->_eip = (u16)dst;
540 ctxt->_eip = (u32)dst;
544 if ((cs_l && is_noncanonical_address(dst)) ||
545 (!cs_l && (dst >> 32) != 0))
546 return emulate_gp(ctxt, 0);
551 WARN(1, "unsupported eip assignment size\n");
553 return X86EMUL_CONTINUE;
556 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
558 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
561 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
563 return assign_eip_near(ctxt, ctxt->_eip + rel);
566 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
569 struct desc_struct desc;
571 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
575 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
580 struct desc_struct desc;
582 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
583 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
586 static int __linearize(struct x86_emulate_ctxt *ctxt,
587 struct segmented_address addr,
588 unsigned size, bool write, bool fetch,
591 struct desc_struct desc;
598 la = seg_base(ctxt, addr.seg) + addr.ea;
599 switch (ctxt->mode) {
600 case X86EMUL_MODE_REAL:
602 case X86EMUL_MODE_PROT64:
603 if (((signed long)la << 16) >> 16 != la)
604 return emulate_gp(ctxt, 0);
607 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
611 /* code segment or read-only data segment */
612 if (((desc.type & 8) || !(desc.type & 2)) && write)
614 /* unreadable code segment */
615 if (!fetch && (desc.type & 8) && !(desc.type & 2))
617 lim = desc_limit_scaled(&desc);
618 if ((desc.type & 8) || !(desc.type & 4)) {
619 /* expand-up segment */
620 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
623 /* exapand-down segment */
624 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
626 lim = desc.d ? 0xffffffff : 0xffff;
627 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
630 cpl = ctxt->ops->cpl(ctxt);
633 if (!(desc.type & 8)) {
637 } else if ((desc.type & 8) && !(desc.type & 4)) {
638 /* nonconforming code segment */
641 } else if ((desc.type & 8) && (desc.type & 4)) {
642 /* conforming code segment */
648 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
651 return X86EMUL_CONTINUE;
653 if (addr.seg == VCPU_SREG_SS)
654 return emulate_ss(ctxt, addr.seg);
656 return emulate_gp(ctxt, addr.seg);
659 static int linearize(struct x86_emulate_ctxt *ctxt,
660 struct segmented_address addr,
661 unsigned size, bool write,
664 return __linearize(ctxt, addr, size, write, false, linear);
668 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
669 struct segmented_address addr,
676 rc = linearize(ctxt, addr, size, false, &linear);
677 if (rc != X86EMUL_CONTINUE)
679 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
683 * Fetch the next byte of the instruction being emulated which is pointed to
684 * by ctxt->_eip, then increment ctxt->_eip.
686 * Also prefetch the remaining bytes of the instruction without crossing page
687 * boundary if they are not in fetch_cache yet.
689 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
691 struct fetch_cache *fc = &ctxt->fetch;
695 if (ctxt->_eip == fc->end) {
696 unsigned long linear;
697 struct segmented_address addr = { .seg = VCPU_SREG_CS,
699 cur_size = fc->end - fc->start;
700 size = min(15UL - cur_size,
701 PAGE_SIZE - offset_in_page(ctxt->_eip));
702 rc = __linearize(ctxt, addr, size, false, true, &linear);
703 if (unlikely(rc != X86EMUL_CONTINUE))
705 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
706 size, &ctxt->exception);
707 if (unlikely(rc != X86EMUL_CONTINUE))
711 *dest = fc->data[ctxt->_eip - fc->start];
713 return X86EMUL_CONTINUE;
716 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
717 void *dest, unsigned size)
721 /* x86 instructions are limited to 15 bytes. */
722 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
723 return X86EMUL_UNHANDLEABLE;
725 rc = do_insn_fetch_byte(ctxt, dest++);
726 if (rc != X86EMUL_CONTINUE)
729 return X86EMUL_CONTINUE;
732 /* Fetch next part of the instruction being emulated. */
733 #define insn_fetch(_type, _ctxt) \
734 ({ unsigned long _x; \
735 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
736 if (rc != X86EMUL_CONTINUE) \
741 #define insn_fetch_arr(_arr, _size, _ctxt) \
742 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
743 if (rc != X86EMUL_CONTINUE) \
748 * Given the 'reg' portion of a ModRM byte, and a register block, return a
749 * pointer into the block that addresses the relevant register.
750 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
752 static void *decode_register(u8 modrm_reg, unsigned long *regs,
757 p = ®s[modrm_reg];
758 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
759 p = (unsigned char *)®s[modrm_reg & 3] + 1;
763 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
764 struct segmented_address addr,
765 u16 *size, unsigned long *address, int op_bytes)
772 rc = segmented_read_std(ctxt, addr, size, 2);
773 if (rc != X86EMUL_CONTINUE)
776 rc = segmented_read_std(ctxt, addr, address, op_bytes);
780 static int test_cc(unsigned int condition, unsigned int flags)
784 switch ((condition & 15) >> 1) {
786 rc |= (flags & EFLG_OF);
788 case 1: /* b/c/nae */
789 rc |= (flags & EFLG_CF);
792 rc |= (flags & EFLG_ZF);
795 rc |= (flags & (EFLG_CF|EFLG_ZF));
798 rc |= (flags & EFLG_SF);
801 rc |= (flags & EFLG_PF);
804 rc |= (flags & EFLG_ZF);
807 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
811 /* Odd condition identifiers (lsb == 1) have inverted sense. */
812 return (!!rc ^ (condition & 1));
815 static void fetch_register_operand(struct operand *op)
819 op->val = *(u8 *)op->addr.reg;
822 op->val = *(u16 *)op->addr.reg;
825 op->val = *(u32 *)op->addr.reg;
828 op->val = *(u64 *)op->addr.reg;
833 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
835 ctxt->ops->get_fpu(ctxt);
837 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
838 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
839 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
840 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
841 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
842 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
843 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
844 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
846 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
847 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
848 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
849 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
850 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
851 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
852 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
853 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
857 ctxt->ops->put_fpu(ctxt);
860 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
863 ctxt->ops->get_fpu(ctxt);
865 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
866 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
867 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
868 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
869 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
870 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
871 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
872 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
874 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
875 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
876 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
877 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
878 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
879 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
880 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
881 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
885 ctxt->ops->put_fpu(ctxt);
888 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
892 unsigned reg = ctxt->modrm_reg;
893 int highbyte_regs = ctxt->rex_prefix == 0;
895 if (!(ctxt->d & ModRM))
896 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
902 read_sse_reg(ctxt, &op->vec_val, reg);
907 if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
908 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
911 op->addr.reg = decode_register(reg, ctxt->regs, 0);
912 op->bytes = ctxt->op_bytes;
914 fetch_register_operand(op);
915 op->orig_val = op->val;
918 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
922 int index_reg = 0, base_reg = 0, scale;
923 int rc = X86EMUL_CONTINUE;
926 if (ctxt->rex_prefix) {
927 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
928 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
929 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
932 ctxt->modrm = insn_fetch(u8, ctxt);
933 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
934 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
935 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
936 ctxt->modrm_seg = VCPU_SREG_DS;
938 if (ctxt->modrm_mod == 3) {
940 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
941 op->addr.reg = decode_register(ctxt->modrm_rm,
942 ctxt->regs, ctxt->d & ByteOp);
946 op->addr.xmm = ctxt->modrm_rm;
947 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
950 fetch_register_operand(op);
956 if (ctxt->ad_bytes == 2) {
957 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
958 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
959 unsigned si = ctxt->regs[VCPU_REGS_RSI];
960 unsigned di = ctxt->regs[VCPU_REGS_RDI];
962 /* 16-bit ModR/M decode. */
963 switch (ctxt->modrm_mod) {
965 if (ctxt->modrm_rm == 6)
966 modrm_ea += insn_fetch(u16, ctxt);
969 modrm_ea += insn_fetch(s8, ctxt);
972 modrm_ea += insn_fetch(u16, ctxt);
975 switch (ctxt->modrm_rm) {
995 if (ctxt->modrm_mod != 0)
1002 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1003 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1004 ctxt->modrm_seg = VCPU_SREG_SS;
1005 modrm_ea = (u16)modrm_ea;
1007 /* 32/64-bit ModR/M decode. */
1008 if ((ctxt->modrm_rm & 7) == 4) {
1009 sib = insn_fetch(u8, ctxt);
1010 index_reg |= (sib >> 3) & 7;
1011 base_reg |= sib & 7;
1014 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1015 modrm_ea += insn_fetch(s32, ctxt);
1017 modrm_ea += ctxt->regs[base_reg];
1019 modrm_ea += ctxt->regs[index_reg] << scale;
1020 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1021 if (ctxt->mode == X86EMUL_MODE_PROT64)
1022 ctxt->rip_relative = 1;
1024 modrm_ea += ctxt->regs[ctxt->modrm_rm];
1025 switch (ctxt->modrm_mod) {
1027 if (ctxt->modrm_rm == 5)
1028 modrm_ea += insn_fetch(s32, ctxt);
1031 modrm_ea += insn_fetch(s8, ctxt);
1034 modrm_ea += insn_fetch(s32, ctxt);
1038 op->addr.mem.ea = modrm_ea;
1043 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1046 int rc = X86EMUL_CONTINUE;
1049 switch (ctxt->ad_bytes) {
1051 op->addr.mem.ea = insn_fetch(u16, ctxt);
1054 op->addr.mem.ea = insn_fetch(u32, ctxt);
1057 op->addr.mem.ea = insn_fetch(u64, ctxt);
1064 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1068 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1069 mask = ~(ctxt->dst.bytes * 8 - 1);
1071 if (ctxt->src.bytes == 2)
1072 sv = (s16)ctxt->src.val & (s16)mask;
1073 else if (ctxt->src.bytes == 4)
1074 sv = (s32)ctxt->src.val & (s32)mask;
1076 ctxt->dst.addr.mem.ea += (sv >> 3);
1079 /* only subword offset */
1080 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1083 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1084 unsigned long addr, void *dest, unsigned size)
1087 struct read_cache *mc = &ctxt->mem_read;
1090 int n = min(size, 8u);
1092 if (mc->pos < mc->end)
1095 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1097 if (rc != X86EMUL_CONTINUE)
1102 memcpy(dest, mc->data + mc->pos, n);
1107 return X86EMUL_CONTINUE;
1110 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1111 struct segmented_address addr,
1118 rc = linearize(ctxt, addr, size, false, &linear);
1119 if (rc != X86EMUL_CONTINUE)
1121 return read_emulated(ctxt, linear, data, size);
1124 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1125 struct segmented_address addr,
1132 rc = linearize(ctxt, addr, size, true, &linear);
1133 if (rc != X86EMUL_CONTINUE)
1135 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1139 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1140 struct segmented_address addr,
1141 const void *orig_data, const void *data,
1147 rc = linearize(ctxt, addr, size, true, &linear);
1148 if (rc != X86EMUL_CONTINUE)
1150 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1151 size, &ctxt->exception);
1154 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1155 unsigned int size, unsigned short port,
1158 struct read_cache *rc = &ctxt->io_read;
1160 if (rc->pos == rc->end) { /* refill pio read ahead */
1161 unsigned int in_page, n;
1162 unsigned int count = ctxt->rep_prefix ?
1163 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1164 in_page = (ctxt->eflags & EFLG_DF) ?
1165 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1166 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1167 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1171 rc->pos = rc->end = 0;
1172 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1177 memcpy(dest, rc->data + rc->pos, size);
1182 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1183 u16 selector, struct desc_ptr *dt)
1185 struct x86_emulate_ops *ops = ctxt->ops;
1187 if (selector & 1 << 2) {
1188 struct desc_struct desc;
1191 memset (dt, 0, sizeof *dt);
1192 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1195 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1196 dt->address = get_desc_base(&desc);
1198 ops->get_gdt(ctxt, dt);
1201 /* allowed just for 8 bytes segments */
1202 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1203 u16 selector, struct desc_struct *desc)
1206 u16 index = selector >> 3;
1209 get_descriptor_table_ptr(ctxt, selector, &dt);
1211 if (dt.size < index * 8 + 7)
1212 return emulate_gp(ctxt, selector & 0xfffc);
1214 addr = dt.address + index * 8;
1215 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1219 /* allowed just for 8 bytes segments */
1220 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1221 u16 selector, struct desc_struct *desc)
1224 u16 index = selector >> 3;
1227 get_descriptor_table_ptr(ctxt, selector, &dt);
1229 if (dt.size < index * 8 + 7)
1230 return emulate_gp(ctxt, selector & 0xfffc);
1232 addr = dt.address + index * 8;
1233 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1237 /* Does not support long mode */
1238 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1239 u16 selector, int seg, u8 cpl,
1240 struct desc_struct *desc)
1242 struct desc_struct seg_desc;
1244 unsigned err_vec = GP_VECTOR;
1246 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1249 memset(&seg_desc, 0, sizeof seg_desc);
1251 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1252 || ctxt->mode == X86EMUL_MODE_REAL) {
1253 /* set real mode segment descriptor */
1254 set_desc_base(&seg_desc, selector << 4);
1255 set_desc_limit(&seg_desc, 0xffff);
1262 /* NULL selector is not valid for TR, CS and SS */
1263 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1267 /* TR should be in GDT only */
1268 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1271 if (null_selector) /* for NULL selector skip all following checks */
1274 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1275 if (ret != X86EMUL_CONTINUE)
1278 err_code = selector & 0xfffc;
1279 err_vec = GP_VECTOR;
1281 /* can't load system descriptor into segment selecor */
1282 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1286 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1296 * segment is not a writable data segment or segment
1297 * selector's RPL != CPL or segment selector's RPL != CPL
1299 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1303 if (!(seg_desc.type & 8))
1306 if (seg_desc.type & 4) {
1312 if (rpl > cpl || dpl != cpl)
1315 /* CS(RPL) <- CPL */
1316 selector = (selector & 0xfffc) | cpl;
1319 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1322 case VCPU_SREG_LDTR:
1323 if (seg_desc.s || seg_desc.type != 2)
1326 default: /* DS, ES, FS, or GS */
1328 * segment is not a data or readable code segment or
1329 * ((segment is a data or nonconforming code segment)
1330 * and (both RPL and CPL > DPL))
1332 if ((seg_desc.type & 0xa) == 0x8 ||
1333 (((seg_desc.type & 0xc) != 0xc) &&
1334 (rpl > dpl && cpl > dpl)))
1340 /* mark segment as accessed */
1342 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1343 if (ret != X86EMUL_CONTINUE)
1347 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1350 return X86EMUL_CONTINUE;
1352 emulate_exception(ctxt, err_vec, err_code, true);
1353 return X86EMUL_PROPAGATE_FAULT;
1356 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1357 u16 selector, int seg)
1359 u8 cpl = ctxt->ops->cpl(ctxt);
1360 return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL);
1363 static void write_register_operand(struct operand *op)
1365 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1366 switch (op->bytes) {
1368 *(u8 *)op->addr.reg = (u8)op->val;
1371 *(u16 *)op->addr.reg = (u16)op->val;
1374 *op->addr.reg = (u32)op->val;
1375 break; /* 64b: zero-extend */
1377 *op->addr.reg = op->val;
1382 static int writeback(struct x86_emulate_ctxt *ctxt)
1386 switch (ctxt->dst.type) {
1388 write_register_operand(&ctxt->dst);
1391 if (ctxt->lock_prefix)
1392 rc = segmented_cmpxchg(ctxt,
1394 &ctxt->dst.orig_val,
1398 rc = segmented_write(ctxt,
1402 if (rc != X86EMUL_CONTINUE)
1406 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1414 return X86EMUL_CONTINUE;
1417 static int em_push(struct x86_emulate_ctxt *ctxt)
1419 struct segmented_address addr;
1421 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1422 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1423 addr.seg = VCPU_SREG_SS;
1425 /* Disable writeback. */
1426 ctxt->dst.type = OP_NONE;
1427 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1430 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1431 void *dest, int len)
1434 struct segmented_address addr;
1436 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1437 addr.seg = VCPU_SREG_SS;
1438 rc = segmented_read(ctxt, addr, dest, len);
1439 if (rc != X86EMUL_CONTINUE)
1442 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1446 static int em_pop(struct x86_emulate_ctxt *ctxt)
1448 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1451 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1452 void *dest, int len)
1455 unsigned long val, change_mask;
1456 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1457 int cpl = ctxt->ops->cpl(ctxt);
1459 rc = emulate_pop(ctxt, &val, len);
1460 if (rc != X86EMUL_CONTINUE)
1463 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1464 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1466 switch(ctxt->mode) {
1467 case X86EMUL_MODE_PROT64:
1468 case X86EMUL_MODE_PROT32:
1469 case X86EMUL_MODE_PROT16:
1471 change_mask |= EFLG_IOPL;
1473 change_mask |= EFLG_IF;
1475 case X86EMUL_MODE_VM86:
1477 return emulate_gp(ctxt, 0);
1478 change_mask |= EFLG_IF;
1480 default: /* real mode */
1481 change_mask |= (EFLG_IOPL | EFLG_IF);
1485 *(unsigned long *)dest =
1486 (ctxt->eflags & ~change_mask) | (val & change_mask);
1491 static int em_popf(struct x86_emulate_ctxt *ctxt)
1493 ctxt->dst.type = OP_REG;
1494 ctxt->dst.addr.reg = &ctxt->eflags;
1495 ctxt->dst.bytes = ctxt->op_bytes;
1496 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1499 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1501 int seg = ctxt->src2.val;
1503 ctxt->src.val = get_segment_selector(ctxt, seg);
1505 return em_push(ctxt);
1508 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1510 int seg = ctxt->src2.val;
1511 unsigned long selector;
1514 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1515 if (rc != X86EMUL_CONTINUE)
1518 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1522 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1524 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1525 int rc = X86EMUL_CONTINUE;
1526 int reg = VCPU_REGS_RAX;
1528 while (reg <= VCPU_REGS_RDI) {
1529 (reg == VCPU_REGS_RSP) ?
1530 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1533 if (rc != X86EMUL_CONTINUE)
1542 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1544 ctxt->src.val = (unsigned long)ctxt->eflags;
1545 return em_push(ctxt);
1548 static int em_popa(struct x86_emulate_ctxt *ctxt)
1550 int rc = X86EMUL_CONTINUE;
1551 int reg = VCPU_REGS_RDI;
1553 while (reg >= VCPU_REGS_RAX) {
1554 if (reg == VCPU_REGS_RSP) {
1555 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1560 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1561 if (rc != X86EMUL_CONTINUE)
1568 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1570 struct x86_emulate_ops *ops = ctxt->ops;
1577 /* TODO: Add limit checks */
1578 ctxt->src.val = ctxt->eflags;
1580 if (rc != X86EMUL_CONTINUE)
1583 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1585 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1587 if (rc != X86EMUL_CONTINUE)
1590 ctxt->src.val = ctxt->_eip;
1592 if (rc != X86EMUL_CONTINUE)
1595 ops->get_idt(ctxt, &dt);
1597 eip_addr = dt.address + (irq << 2);
1598 cs_addr = dt.address + (irq << 2) + 2;
1600 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1601 if (rc != X86EMUL_CONTINUE)
1604 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1605 if (rc != X86EMUL_CONTINUE)
1608 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1609 if (rc != X86EMUL_CONTINUE)
1617 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1619 switch(ctxt->mode) {
1620 case X86EMUL_MODE_REAL:
1621 return emulate_int_real(ctxt, irq);
1622 case X86EMUL_MODE_VM86:
1623 case X86EMUL_MODE_PROT16:
1624 case X86EMUL_MODE_PROT32:
1625 case X86EMUL_MODE_PROT64:
1627 /* Protected mode interrupts unimplemented yet */
1628 return X86EMUL_UNHANDLEABLE;
1632 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1634 int rc = X86EMUL_CONTINUE;
1635 unsigned long temp_eip = 0;
1636 unsigned long temp_eflags = 0;
1637 unsigned long cs = 0;
1638 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1639 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1640 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1641 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1643 /* TODO: Add stack limit check */
1645 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1647 if (rc != X86EMUL_CONTINUE)
1650 if (temp_eip & ~0xffff)
1651 return emulate_gp(ctxt, 0);
1653 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1655 if (rc != X86EMUL_CONTINUE)
1658 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1660 if (rc != X86EMUL_CONTINUE)
1663 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1665 if (rc != X86EMUL_CONTINUE)
1668 ctxt->_eip = temp_eip;
1671 if (ctxt->op_bytes == 4)
1672 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1673 else if (ctxt->op_bytes == 2) {
1674 ctxt->eflags &= ~0xffff;
1675 ctxt->eflags |= temp_eflags;
1678 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1679 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1684 static int em_iret(struct x86_emulate_ctxt *ctxt)
1686 switch(ctxt->mode) {
1687 case X86EMUL_MODE_REAL:
1688 return emulate_iret_real(ctxt);
1689 case X86EMUL_MODE_VM86:
1690 case X86EMUL_MODE_PROT16:
1691 case X86EMUL_MODE_PROT32:
1692 case X86EMUL_MODE_PROT64:
1694 /* iret from protected mode unimplemented yet */
1695 return X86EMUL_UNHANDLEABLE;
1699 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1702 unsigned short sel, old_sel;
1703 struct desc_struct old_desc, new_desc;
1704 const struct x86_emulate_ops *ops = ctxt->ops;
1705 u8 cpl = ctxt->ops->cpl(ctxt);
1707 /* Assignment of RIP may only fail in 64-bit mode */
1708 if (ctxt->mode == X86EMUL_MODE_PROT64)
1709 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
1712 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1714 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
1716 if (rc != X86EMUL_CONTINUE)
1719 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1720 if (rc != X86EMUL_CONTINUE) {
1721 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1722 /* assigning eip failed; restore the old cs */
1723 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
1729 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1731 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1734 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1736 switch (ctxt->modrm_reg) {
1738 emulate_2op_SrcB(ctxt, "rol");
1741 emulate_2op_SrcB(ctxt, "ror");
1744 emulate_2op_SrcB(ctxt, "rcl");
1747 emulate_2op_SrcB(ctxt, "rcr");
1749 case 4: /* sal/shl */
1750 case 6: /* sal/shl */
1751 emulate_2op_SrcB(ctxt, "sal");
1754 emulate_2op_SrcB(ctxt, "shr");
1757 emulate_2op_SrcB(ctxt, "sar");
1760 return X86EMUL_CONTINUE;
1763 static int em_not(struct x86_emulate_ctxt *ctxt)
1765 ctxt->dst.val = ~ctxt->dst.val;
1766 return X86EMUL_CONTINUE;
1769 static int em_neg(struct x86_emulate_ctxt *ctxt)
1771 emulate_1op(ctxt, "neg");
1772 return X86EMUL_CONTINUE;
1775 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1779 emulate_1op_rax_rdx(ctxt, "mul", ex);
1780 return X86EMUL_CONTINUE;
1783 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1787 emulate_1op_rax_rdx(ctxt, "imul", ex);
1788 return X86EMUL_CONTINUE;
1791 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1795 emulate_1op_rax_rdx(ctxt, "div", de);
1797 return emulate_de(ctxt);
1798 return X86EMUL_CONTINUE;
1801 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1805 emulate_1op_rax_rdx(ctxt, "idiv", de);
1807 return emulate_de(ctxt);
1808 return X86EMUL_CONTINUE;
1811 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1813 int rc = X86EMUL_CONTINUE;
1815 switch (ctxt->modrm_reg) {
1817 emulate_1op(ctxt, "inc");
1820 emulate_1op(ctxt, "dec");
1822 case 2: /* call near abs */ {
1824 old_eip = ctxt->_eip;
1825 rc = assign_eip_near(ctxt, ctxt->src.val);
1826 if (rc != X86EMUL_CONTINUE)
1828 ctxt->src.val = old_eip;
1832 case 4: /* jmp abs */
1833 rc = assign_eip_near(ctxt, ctxt->src.val);
1835 case 5: /* jmp far */
1836 rc = em_jmp_far(ctxt);
1845 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1847 u64 old = ctxt->dst.orig_val64;
1849 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1850 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1851 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1852 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1853 ctxt->eflags &= ~EFLG_ZF;
1855 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1856 (u32) ctxt->regs[VCPU_REGS_RBX];
1858 ctxt->eflags |= EFLG_ZF;
1860 return X86EMUL_CONTINUE;
1863 static int em_ret(struct x86_emulate_ctxt *ctxt)
1868 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1869 if (rc != X86EMUL_CONTINUE)
1872 return assign_eip_near(ctxt, eip);
1875 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1878 unsigned long eip, cs;
1880 int cpl = ctxt->ops->cpl(ctxt);
1881 struct desc_struct old_desc, new_desc;
1882 const struct x86_emulate_ops *ops = ctxt->ops;
1884 if (ctxt->mode == X86EMUL_MODE_PROT64)
1885 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
1888 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1889 if (rc != X86EMUL_CONTINUE)
1891 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1892 if (rc != X86EMUL_CONTINUE)
1894 /* Outer-privilege level return is not implemented */
1895 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
1896 return X86EMUL_UNHANDLEABLE;
1897 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0,
1899 if (rc != X86EMUL_CONTINUE)
1901 rc = assign_eip_far(ctxt, eip, new_desc.l);
1902 if (rc != X86EMUL_CONTINUE) {
1903 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1904 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
1909 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1911 int seg = ctxt->src2.val;
1915 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1917 rc = load_segment_descriptor(ctxt, sel, seg);
1918 if (rc != X86EMUL_CONTINUE)
1921 ctxt->dst.val = ctxt->src.val;
1926 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1927 struct desc_struct *cs, struct desc_struct *ss)
1931 memset(cs, 0, sizeof(struct desc_struct));
1932 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1933 memset(ss, 0, sizeof(struct desc_struct));
1935 cs->l = 0; /* will be adjusted later */
1936 set_desc_base(cs, 0); /* flat segment */
1937 cs->g = 1; /* 4kb granularity */
1938 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1939 cs->type = 0x0b; /* Read, Execute, Accessed */
1941 cs->dpl = 0; /* will be adjusted later */
1945 set_desc_base(ss, 0); /* flat segment */
1946 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1947 ss->g = 1; /* 4kb granularity */
1949 ss->type = 0x03; /* Read/Write, Accessed */
1950 ss->d = 1; /* 32bit stack segment */
1955 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
1957 u32 eax, ebx, ecx, edx;
1960 return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)
1961 && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
1962 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
1963 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
1966 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1968 struct x86_emulate_ops *ops = ctxt->ops;
1969 u32 eax, ebx, ecx, edx;
1972 * syscall should always be enabled in longmode - so only become
1973 * vendor specific (cpuid) if other modes are active...
1975 if (ctxt->mode == X86EMUL_MODE_PROT64)
1980 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1982 * Intel ("GenuineIntel")
1983 * remark: Intel CPUs only support "syscall" in 64bit
1984 * longmode. Also an 64bit guest with a
1985 * 32bit compat-app running will #UD !! While this
1986 * behaviour can be fixed (by emulating) into AMD
1987 * response - CPUs of AMD can't behave like Intel.
1989 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1990 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1991 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1994 /* AMD ("AuthenticAMD") */
1995 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1996 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1997 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2000 /* AMD ("AMDisbetter!") */
2001 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2002 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2003 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2007 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2011 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2013 struct x86_emulate_ops *ops = ctxt->ops;
2014 struct desc_struct cs, ss;
2019 /* syscall is not available in real mode */
2020 if (ctxt->mode == X86EMUL_MODE_REAL ||
2021 ctxt->mode == X86EMUL_MODE_VM86)
2022 return emulate_ud(ctxt);
2024 if (!(em_syscall_is_enabled(ctxt)))
2025 return emulate_ud(ctxt);
2027 ops->get_msr(ctxt, MSR_EFER, &efer);
2028 setup_syscalls_segments(ctxt, &cs, &ss);
2030 if (!(efer & EFER_SCE))
2031 return emulate_ud(ctxt);
2033 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2035 cs_sel = (u16)(msr_data & 0xfffc);
2036 ss_sel = (u16)(msr_data + 8);
2038 if (efer & EFER_LMA) {
2042 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2043 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2045 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2046 if (efer & EFER_LMA) {
2047 #ifdef CONFIG_X86_64
2048 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2051 ctxt->mode == X86EMUL_MODE_PROT64 ?
2052 MSR_LSTAR : MSR_CSTAR, &msr_data);
2053 ctxt->_eip = msr_data;
2055 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2056 ctxt->eflags &= ~(msr_data | EFLG_RF);
2060 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2061 ctxt->_eip = (u32)msr_data;
2063 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2066 return X86EMUL_CONTINUE;
2069 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2071 struct x86_emulate_ops *ops = ctxt->ops;
2072 struct desc_struct cs, ss;
2077 ops->get_msr(ctxt, MSR_EFER, &efer);
2078 /* inject #GP if in real mode */
2079 if (ctxt->mode == X86EMUL_MODE_REAL)
2080 return emulate_gp(ctxt, 0);
2083 * Not recognized on AMD in compat mode (but is recognized in legacy
2086 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2087 && !vendor_intel(ctxt))
2088 return emulate_ud(ctxt);
2090 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2091 * Therefore, we inject an #UD.
2093 if (ctxt->mode == X86EMUL_MODE_PROT64)
2094 return emulate_ud(ctxt);
2096 setup_syscalls_segments(ctxt, &cs, &ss);
2098 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2099 switch (ctxt->mode) {
2100 case X86EMUL_MODE_PROT32:
2101 if ((msr_data & 0xfffc) == 0x0)
2102 return emulate_gp(ctxt, 0);
2104 case X86EMUL_MODE_PROT64:
2105 if (msr_data == 0x0)
2106 return emulate_gp(ctxt, 0);
2110 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2111 cs_sel = (u16)msr_data;
2112 cs_sel &= ~SELECTOR_RPL_MASK;
2113 ss_sel = cs_sel + 8;
2114 ss_sel &= ~SELECTOR_RPL_MASK;
2115 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2120 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2121 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2123 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2124 ctxt->_eip = msr_data;
2126 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2127 ctxt->regs[VCPU_REGS_RSP] = msr_data;
2129 return X86EMUL_CONTINUE;
2132 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2134 struct x86_emulate_ops *ops = ctxt->ops;
2135 struct desc_struct cs, ss;
2136 u64 msr_data, rcx, rdx;
2138 u16 cs_sel = 0, ss_sel = 0;
2140 /* inject #GP if in real mode or Virtual 8086 mode */
2141 if (ctxt->mode == X86EMUL_MODE_REAL ||
2142 ctxt->mode == X86EMUL_MODE_VM86)
2143 return emulate_gp(ctxt, 0);
2145 setup_syscalls_segments(ctxt, &cs, &ss);
2147 if ((ctxt->rex_prefix & 0x8) != 0x0)
2148 usermode = X86EMUL_MODE_PROT64;
2150 usermode = X86EMUL_MODE_PROT32;
2152 rcx = ctxt->regs[VCPU_REGS_RCX];
2153 rdx = ctxt->regs[VCPU_REGS_RDX];
2157 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2159 case X86EMUL_MODE_PROT32:
2160 cs_sel = (u16)(msr_data + 16);
2161 if ((msr_data & 0xfffc) == 0x0)
2162 return emulate_gp(ctxt, 0);
2163 ss_sel = (u16)(msr_data + 24);
2165 case X86EMUL_MODE_PROT64:
2166 cs_sel = (u16)(msr_data + 32);
2167 if (msr_data == 0x0)
2168 return emulate_gp(ctxt, 0);
2169 ss_sel = cs_sel + 8;
2172 if (is_noncanonical_address(rcx) ||
2173 is_noncanonical_address(rdx))
2174 return emulate_gp(ctxt, 0);
2177 cs_sel |= SELECTOR_RPL_MASK;
2178 ss_sel |= SELECTOR_RPL_MASK;
2180 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2181 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2184 ctxt->regs[VCPU_REGS_RSP] = rcx;
2186 return X86EMUL_CONTINUE;
2189 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2192 if (ctxt->mode == X86EMUL_MODE_REAL)
2194 if (ctxt->mode == X86EMUL_MODE_VM86)
2196 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2197 return ctxt->ops->cpl(ctxt) > iopl;
2200 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2203 struct x86_emulate_ops *ops = ctxt->ops;
2204 struct desc_struct tr_seg;
2207 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2208 unsigned mask = (1 << len) - 1;
2211 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2214 if (desc_limit_scaled(&tr_seg) < 103)
2216 base = get_desc_base(&tr_seg);
2217 #ifdef CONFIG_X86_64
2218 base |= ((u64)base3) << 32;
2220 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2221 if (r != X86EMUL_CONTINUE)
2223 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2225 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2226 if (r != X86EMUL_CONTINUE)
2228 if ((perm >> bit_idx) & mask)
2233 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2239 if (emulator_bad_iopl(ctxt))
2240 if (!emulator_io_port_access_allowed(ctxt, port, len))
2243 ctxt->perm_ok = true;
2248 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2249 struct tss_segment_16 *tss)
2251 tss->ip = ctxt->_eip;
2252 tss->flag = ctxt->eflags;
2253 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2254 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2255 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2256 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2257 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2258 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2259 tss->si = ctxt->regs[VCPU_REGS_RSI];
2260 tss->di = ctxt->regs[VCPU_REGS_RDI];
2262 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2263 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2264 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2265 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2266 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2269 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2270 struct tss_segment_16 *tss)
2275 ctxt->_eip = tss->ip;
2276 ctxt->eflags = tss->flag | 2;
2277 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2278 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2279 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2280 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2281 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2282 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2283 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2284 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2287 * SDM says that segment selectors are loaded before segment
2290 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2291 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2292 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2293 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2294 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2299 * Now load segment descriptors. If fault happenes at this stage
2300 * it is handled in a context of new task
2302 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2304 if (ret != X86EMUL_CONTINUE)
2306 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2308 if (ret != X86EMUL_CONTINUE)
2310 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2312 if (ret != X86EMUL_CONTINUE)
2314 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2316 if (ret != X86EMUL_CONTINUE)
2318 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2320 if (ret != X86EMUL_CONTINUE)
2323 return X86EMUL_CONTINUE;
2326 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2327 u16 tss_selector, u16 old_tss_sel,
2328 ulong old_tss_base, struct desc_struct *new_desc)
2330 struct x86_emulate_ops *ops = ctxt->ops;
2331 struct tss_segment_16 tss_seg;
2333 u32 new_tss_base = get_desc_base(new_desc);
2335 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2337 if (ret != X86EMUL_CONTINUE)
2338 /* FIXME: need to provide precise fault address */
2341 save_state_to_tss16(ctxt, &tss_seg);
2343 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2345 if (ret != X86EMUL_CONTINUE)
2346 /* FIXME: need to provide precise fault address */
2349 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2351 if (ret != X86EMUL_CONTINUE)
2352 /* FIXME: need to provide precise fault address */
2355 if (old_tss_sel != 0xffff) {
2356 tss_seg.prev_task_link = old_tss_sel;
2358 ret = ops->write_std(ctxt, new_tss_base,
2359 &tss_seg.prev_task_link,
2360 sizeof tss_seg.prev_task_link,
2362 if (ret != X86EMUL_CONTINUE)
2363 /* FIXME: need to provide precise fault address */
2367 return load_state_from_tss16(ctxt, &tss_seg);
2370 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2371 struct tss_segment_32 *tss)
2373 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2374 tss->eip = ctxt->_eip;
2375 tss->eflags = ctxt->eflags;
2376 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2377 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2378 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2379 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2380 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2381 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2382 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2383 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2385 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2386 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2387 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2388 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2389 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2390 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2391 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2394 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2395 struct tss_segment_32 *tss)
2400 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2401 return emulate_gp(ctxt, 0);
2402 ctxt->_eip = tss->eip;
2403 ctxt->eflags = tss->eflags | 2;
2404 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2405 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2406 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2407 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2408 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2409 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2410 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2411 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2414 * SDM says that segment selectors are loaded before segment
2415 * descriptors. This is important because CPL checks will
2418 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2419 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2420 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2421 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2422 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2423 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2424 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2429 * Now load segment descriptors. If fault happenes at this stage
2430 * it is handled in a context of new task
2432 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2434 if (ret != X86EMUL_CONTINUE)
2436 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2438 if (ret != X86EMUL_CONTINUE)
2440 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2442 if (ret != X86EMUL_CONTINUE)
2444 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2446 if (ret != X86EMUL_CONTINUE)
2448 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2450 if (ret != X86EMUL_CONTINUE)
2452 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2454 if (ret != X86EMUL_CONTINUE)
2456 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2458 if (ret != X86EMUL_CONTINUE)
2461 return X86EMUL_CONTINUE;
2464 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2465 u16 tss_selector, u16 old_tss_sel,
2466 ulong old_tss_base, struct desc_struct *new_desc)
2468 struct x86_emulate_ops *ops = ctxt->ops;
2469 struct tss_segment_32 tss_seg;
2471 u32 new_tss_base = get_desc_base(new_desc);
2473 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2475 if (ret != X86EMUL_CONTINUE)
2476 /* FIXME: need to provide precise fault address */
2479 save_state_to_tss32(ctxt, &tss_seg);
2481 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2483 if (ret != X86EMUL_CONTINUE)
2484 /* FIXME: need to provide precise fault address */
2487 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2489 if (ret != X86EMUL_CONTINUE)
2490 /* FIXME: need to provide precise fault address */
2493 if (old_tss_sel != 0xffff) {
2494 tss_seg.prev_task_link = old_tss_sel;
2496 ret = ops->write_std(ctxt, new_tss_base,
2497 &tss_seg.prev_task_link,
2498 sizeof tss_seg.prev_task_link,
2500 if (ret != X86EMUL_CONTINUE)
2501 /* FIXME: need to provide precise fault address */
2505 return load_state_from_tss32(ctxt, &tss_seg);
2508 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2509 u16 tss_selector, int reason,
2510 bool has_error_code, u32 error_code)
2512 struct x86_emulate_ops *ops = ctxt->ops;
2513 struct desc_struct curr_tss_desc, next_tss_desc;
2515 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2516 ulong old_tss_base =
2517 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2520 /* FIXME: old_tss_base == ~0 ? */
2522 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2523 if (ret != X86EMUL_CONTINUE)
2525 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2526 if (ret != X86EMUL_CONTINUE)
2529 /* FIXME: check that next_tss_desc is tss */
2531 if (reason != TASK_SWITCH_IRET) {
2532 if ((tss_selector & 3) > next_tss_desc.dpl ||
2533 ops->cpl(ctxt) > next_tss_desc.dpl)
2534 return emulate_gp(ctxt, 0);
2537 desc_limit = desc_limit_scaled(&next_tss_desc);
2538 if (!next_tss_desc.p ||
2539 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2540 desc_limit < 0x2b)) {
2541 emulate_ts(ctxt, tss_selector & 0xfffc);
2542 return X86EMUL_PROPAGATE_FAULT;
2545 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2546 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2547 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2550 if (reason == TASK_SWITCH_IRET)
2551 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2553 /* set back link to prev task only if NT bit is set in eflags
2554 note that old_tss_sel is not used afetr this point */
2555 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2556 old_tss_sel = 0xffff;
2558 if (next_tss_desc.type & 8)
2559 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2560 old_tss_base, &next_tss_desc);
2562 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2563 old_tss_base, &next_tss_desc);
2564 if (ret != X86EMUL_CONTINUE)
2567 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2568 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2570 if (reason != TASK_SWITCH_IRET) {
2571 next_tss_desc.type |= (1 << 1); /* set busy flag */
2572 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2575 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2576 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2578 if (has_error_code) {
2579 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2580 ctxt->lock_prefix = 0;
2581 ctxt->src.val = (unsigned long) error_code;
2582 ret = em_push(ctxt);
2588 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2589 u16 tss_selector, int reason,
2590 bool has_error_code, u32 error_code)
2594 ctxt->_eip = ctxt->eip;
2595 ctxt->dst.type = OP_NONE;
2597 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2598 has_error_code, error_code);
2600 if (rc == X86EMUL_CONTINUE)
2601 ctxt->eip = ctxt->_eip;
2603 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2606 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2607 int reg, struct operand *op)
2609 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2611 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2612 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2613 op->addr.mem.seg = seg;
2616 static int em_das(struct x86_emulate_ctxt *ctxt)
2619 bool af, cf, old_cf;
2621 cf = ctxt->eflags & X86_EFLAGS_CF;
2627 af = ctxt->eflags & X86_EFLAGS_AF;
2628 if ((al & 0x0f) > 9 || af) {
2630 cf = old_cf | (al >= 250);
2635 if (old_al > 0x99 || old_cf) {
2641 /* Set PF, ZF, SF */
2642 ctxt->src.type = OP_IMM;
2644 ctxt->src.bytes = 1;
2645 emulate_2op_SrcV(ctxt, "or");
2646 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2648 ctxt->eflags |= X86_EFLAGS_CF;
2650 ctxt->eflags |= X86_EFLAGS_AF;
2651 return X86EMUL_CONTINUE;
2654 static int em_call(struct x86_emulate_ctxt *ctxt)
2657 long rel = ctxt->src.val;
2659 ctxt->src.val = (unsigned long)ctxt->_eip;
2660 rc = jmp_rel(ctxt, rel);
2661 if (rc != X86EMUL_CONTINUE)
2663 return em_push(ctxt);
2666 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2671 struct desc_struct old_desc, new_desc;
2672 const struct x86_emulate_ops *ops = ctxt->ops;
2673 int cpl = ctxt->ops->cpl(ctxt);
2675 old_eip = ctxt->_eip;
2676 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
2678 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2679 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2681 if (rc != X86EMUL_CONTINUE)
2682 return X86EMUL_CONTINUE;
2684 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2685 if (rc != X86EMUL_CONTINUE)
2688 ctxt->src.val = old_cs;
2690 if (rc != X86EMUL_CONTINUE)
2693 ctxt->src.val = old_eip;
2695 /* If we failed, we tainted the memory, but the very least we should
2697 if (rc != X86EMUL_CONTINUE)
2701 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2706 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2711 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2712 if (rc != X86EMUL_CONTINUE)
2714 rc = assign_eip_near(ctxt, eip);
2715 if (rc != X86EMUL_CONTINUE)
2717 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2718 return X86EMUL_CONTINUE;
2721 static int em_add(struct x86_emulate_ctxt *ctxt)
2723 emulate_2op_SrcV(ctxt, "add");
2724 return X86EMUL_CONTINUE;
2727 static int em_or(struct x86_emulate_ctxt *ctxt)
2729 emulate_2op_SrcV(ctxt, "or");
2730 return X86EMUL_CONTINUE;
2733 static int em_adc(struct x86_emulate_ctxt *ctxt)
2735 emulate_2op_SrcV(ctxt, "adc");
2736 return X86EMUL_CONTINUE;
2739 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2741 emulate_2op_SrcV(ctxt, "sbb");
2742 return X86EMUL_CONTINUE;
2745 static int em_and(struct x86_emulate_ctxt *ctxt)
2747 emulate_2op_SrcV(ctxt, "and");
2748 return X86EMUL_CONTINUE;
2751 static int em_sub(struct x86_emulate_ctxt *ctxt)
2753 emulate_2op_SrcV(ctxt, "sub");
2754 return X86EMUL_CONTINUE;
2757 static int em_xor(struct x86_emulate_ctxt *ctxt)
2759 emulate_2op_SrcV(ctxt, "xor");
2760 return X86EMUL_CONTINUE;
2763 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2765 emulate_2op_SrcV(ctxt, "cmp");
2766 /* Disable writeback. */
2767 ctxt->dst.type = OP_NONE;
2768 return X86EMUL_CONTINUE;
2771 static int em_test(struct x86_emulate_ctxt *ctxt)
2773 emulate_2op_SrcV(ctxt, "test");
2774 /* Disable writeback. */
2775 ctxt->dst.type = OP_NONE;
2776 return X86EMUL_CONTINUE;
2779 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2781 /* Write back the register source. */
2782 ctxt->src.val = ctxt->dst.val;
2783 write_register_operand(&ctxt->src);
2785 /* Write back the memory destination with implicit LOCK prefix. */
2786 ctxt->dst.val = ctxt->src.orig_val;
2787 ctxt->lock_prefix = 1;
2788 return X86EMUL_CONTINUE;
2791 static int em_imul(struct x86_emulate_ctxt *ctxt)
2793 emulate_2op_SrcV_nobyte(ctxt, "imul");
2794 return X86EMUL_CONTINUE;
2797 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2799 ctxt->dst.val = ctxt->src2.val;
2800 return em_imul(ctxt);
2803 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2805 ctxt->dst.type = OP_REG;
2806 ctxt->dst.bytes = ctxt->src.bytes;
2807 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2808 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2810 return X86EMUL_CONTINUE;
2813 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2817 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2818 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2819 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2820 return X86EMUL_CONTINUE;
2823 static int em_mov(struct x86_emulate_ctxt *ctxt)
2825 ctxt->dst.val = ctxt->src.val;
2826 return X86EMUL_CONTINUE;
2829 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2831 if (ctxt->modrm_reg > VCPU_SREG_GS)
2832 return emulate_ud(ctxt);
2834 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2835 return X86EMUL_CONTINUE;
2838 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2840 u16 sel = ctxt->src.val;
2842 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2843 return emulate_ud(ctxt);
2845 if (ctxt->modrm_reg == VCPU_SREG_SS)
2846 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2848 /* Disable writeback. */
2849 ctxt->dst.type = OP_NONE;
2850 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2853 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2855 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2856 return X86EMUL_CONTINUE;
2859 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2864 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2865 if (rc == X86EMUL_CONTINUE)
2866 ctxt->ops->invlpg(ctxt, linear);
2867 /* Disable writeback. */
2868 ctxt->dst.type = OP_NONE;
2869 return X86EMUL_CONTINUE;
2872 static int em_clts(struct x86_emulate_ctxt *ctxt)
2876 cr0 = ctxt->ops->get_cr(ctxt, 0);
2878 ctxt->ops->set_cr(ctxt, 0, cr0);
2879 return X86EMUL_CONTINUE;
2882 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2886 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2887 return X86EMUL_UNHANDLEABLE;
2889 rc = ctxt->ops->fix_hypercall(ctxt);
2890 if (rc != X86EMUL_CONTINUE)
2893 /* Let the processor re-execute the fixed hypercall */
2894 ctxt->_eip = ctxt->eip;
2895 /* Disable writeback. */
2896 ctxt->dst.type = OP_NONE;
2897 return X86EMUL_CONTINUE;
2900 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2902 struct desc_ptr desc_ptr;
2905 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2906 &desc_ptr.size, &desc_ptr.address,
2908 if (rc != X86EMUL_CONTINUE)
2910 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2911 /* Disable writeback. */
2912 ctxt->dst.type = OP_NONE;
2913 return X86EMUL_CONTINUE;
2916 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2920 rc = ctxt->ops->fix_hypercall(ctxt);
2922 /* Disable writeback. */
2923 ctxt->dst.type = OP_NONE;
2927 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2929 struct desc_ptr desc_ptr;
2932 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2933 &desc_ptr.size, &desc_ptr.address,
2935 if (rc != X86EMUL_CONTINUE)
2937 ctxt->ops->set_idt(ctxt, &desc_ptr);
2938 /* Disable writeback. */
2939 ctxt->dst.type = OP_NONE;
2940 return X86EMUL_CONTINUE;
2943 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2945 ctxt->dst.bytes = 2;
2946 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2947 return X86EMUL_CONTINUE;
2950 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2952 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2953 | (ctxt->src.val & 0x0f));
2954 ctxt->dst.type = OP_NONE;
2955 return X86EMUL_CONTINUE;
2958 static int em_loop(struct x86_emulate_ctxt *ctxt)
2960 int rc = X86EMUL_CONTINUE;
2962 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2963 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2964 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2965 rc = jmp_rel(ctxt, ctxt->src.val);
2970 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2972 int rc = X86EMUL_CONTINUE;
2974 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2975 rc = jmp_rel(ctxt, ctxt->src.val);
2980 static int em_cli(struct x86_emulate_ctxt *ctxt)
2982 if (emulator_bad_iopl(ctxt))
2983 return emulate_gp(ctxt, 0);
2985 ctxt->eflags &= ~X86_EFLAGS_IF;
2986 return X86EMUL_CONTINUE;
2989 static int em_sti(struct x86_emulate_ctxt *ctxt)
2991 if (emulator_bad_iopl(ctxt))
2992 return emulate_gp(ctxt, 0);
2994 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2995 ctxt->eflags |= X86_EFLAGS_IF;
2996 return X86EMUL_CONTINUE;
2999 static bool valid_cr(int nr)
3011 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3013 if (!valid_cr(ctxt->modrm_reg))
3014 return emulate_ud(ctxt);
3016 return X86EMUL_CONTINUE;
3019 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3021 u64 new_val = ctxt->src.val64;
3022 int cr = ctxt->modrm_reg;
3025 static u64 cr_reserved_bits[] = {
3026 0xffffffff00000000ULL,
3027 0, 0, 0, /* CR3 checked later */
3034 return emulate_ud(ctxt);
3036 if (new_val & cr_reserved_bits[cr])
3037 return emulate_gp(ctxt, 0);
3042 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3043 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3044 return emulate_gp(ctxt, 0);
3046 cr4 = ctxt->ops->get_cr(ctxt, 4);
3047 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3049 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3050 !(cr4 & X86_CR4_PAE))
3051 return emulate_gp(ctxt, 0);
3058 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3059 if (efer & EFER_LMA)
3060 rsvd = CR3_L_MODE_RESERVED_BITS;
3061 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3062 rsvd = CR3_PAE_RESERVED_BITS;
3063 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3064 rsvd = CR3_NONPAE_RESERVED_BITS;
3067 return emulate_gp(ctxt, 0);
3074 cr4 = ctxt->ops->get_cr(ctxt, 4);
3075 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3077 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3078 return emulate_gp(ctxt, 0);
3084 return X86EMUL_CONTINUE;
3087 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3091 ctxt->ops->get_dr(ctxt, 7, &dr7);
3093 /* Check if DR7.Global_Enable is set */
3094 return dr7 & (1 << 13);
3097 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3099 int dr = ctxt->modrm_reg;
3103 return emulate_ud(ctxt);
3105 cr4 = ctxt->ops->get_cr(ctxt, 4);
3106 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3107 return emulate_ud(ctxt);
3109 if (check_dr7_gd(ctxt))
3110 return emulate_db(ctxt);
3112 return X86EMUL_CONTINUE;
3115 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3117 u64 new_val = ctxt->src.val64;
3118 int dr = ctxt->modrm_reg;
3120 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3121 return emulate_gp(ctxt, 0);
3123 return check_dr_read(ctxt);
3126 static int check_svme(struct x86_emulate_ctxt *ctxt)
3130 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3132 if (!(efer & EFER_SVME))
3133 return emulate_ud(ctxt);
3135 return X86EMUL_CONTINUE;
3138 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3140 u64 rax = ctxt->regs[VCPU_REGS_RAX];
3142 /* Valid physical address? */
3143 if (rax & 0xffff000000000000ULL)
3144 return emulate_gp(ctxt, 0);
3146 return check_svme(ctxt);
3149 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3151 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3153 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3154 return emulate_ud(ctxt);
3156 return X86EMUL_CONTINUE;
3159 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3161 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3162 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3164 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3166 return emulate_gp(ctxt, 0);
3168 return X86EMUL_CONTINUE;
3171 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3173 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3174 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3175 return emulate_gp(ctxt, 0);
3177 return X86EMUL_CONTINUE;
3180 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3182 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3183 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3184 return emulate_gp(ctxt, 0);
3186 return X86EMUL_CONTINUE;
3189 #define D(_y) { .flags = (_y) }
3190 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3191 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3192 .check_perm = (_p) }
3194 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3195 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3196 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3197 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3198 #define II(_f, _e, _i) \
3199 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3200 #define IIP(_f, _e, _i, _p) \
3201 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3202 .check_perm = (_p) }
3203 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3205 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3206 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3207 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3209 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3210 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3211 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3213 static struct opcode group7_rm1[] = {
3214 DI(SrcNone | ModRM | Priv, monitor),
3215 DI(SrcNone | ModRM | Priv, mwait),
3219 static struct opcode group7_rm3[] = {
3220 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3221 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3222 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3223 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3224 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3225 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3226 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3227 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3230 static struct opcode group7_rm7[] = {
3232 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3236 static struct opcode group1[] = {
3247 static struct opcode group1A[] = {
3248 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3251 static struct opcode group3[] = {
3252 I(DstMem | SrcImm | ModRM, em_test),
3253 I(DstMem | SrcImm | ModRM, em_test),
3254 I(DstMem | SrcNone | ModRM | Lock, em_not),
3255 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3256 I(SrcMem | ModRM, em_mul_ex),
3257 I(SrcMem | ModRM, em_imul_ex),
3258 I(SrcMem | ModRM, em_div_ex),
3259 I(SrcMem | ModRM, em_idiv_ex),
3262 static struct opcode group4[] = {
3263 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3267 static struct opcode group5[] = {
3268 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3269 D(SrcMem | ModRM | Stack),
3270 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3271 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3272 D(SrcMem | ModRM | Stack), N,
3275 static struct opcode group6[] = {
3276 DI(ModRM | Prot, sldt),
3277 DI(ModRM | Prot, str),
3278 DI(ModRM | Prot | Priv, lldt),
3279 DI(ModRM | Prot | Priv, ltr),
3283 static struct group_dual group7 = { {
3284 DI(ModRM | Mov | DstMem | Priv, sgdt),
3285 DI(ModRM | Mov | DstMem | Priv, sidt),
3286 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3287 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3288 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3289 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3290 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3292 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3294 N, EXT(0, group7_rm3),
3295 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3296 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3299 static struct opcode group8[] = {
3301 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3302 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3305 static struct group_dual group9 = { {
3306 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3308 N, N, N, N, N, N, N, N,
3311 static struct opcode group11[] = {
3312 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3315 static struct gprefix pfx_0f_6f_0f_7f = {
3316 N, N, N, I(Sse, em_movdqu),
3319 static struct opcode opcode_table[256] = {
3321 I6ALU(Lock, em_add),
3322 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3323 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3326 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3329 I6ALU(Lock, em_adc),
3330 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3331 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3333 I6ALU(Lock, em_sbb),
3334 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3335 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3337 I6ALU(Lock, em_and), N, N,
3339 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3341 I6ALU(Lock, em_xor), N, N,
3343 I6ALU(0, em_cmp), N, N,
3347 X8(I(SrcReg | Stack, em_push)),
3349 X8(I(DstReg | Stack, em_pop)),
3351 I(ImplicitOps | Stack | No64, em_pusha),
3352 I(ImplicitOps | Stack | No64, em_popa),
3353 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3356 I(SrcImm | Mov | Stack, em_push),
3357 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3358 I(SrcImmByte | Mov | Stack, em_push),
3359 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3360 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3361 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3365 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3366 G(DstMem | SrcImm | ModRM | Group, group1),
3367 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3368 G(DstMem | SrcImmByte | ModRM | Group, group1),
3369 I2bv(DstMem | SrcReg | ModRM, em_test),
3370 I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3372 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3373 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3374 I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3375 D(ModRM | SrcMem | NoAccess | DstReg),
3376 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3379 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3381 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3382 I(SrcImmFAddr | No64, em_call_far), N,
3383 II(ImplicitOps | Stack, em_pushf, pushf),
3384 II(ImplicitOps | Stack, em_popf, popf), N, N,
3386 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3387 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3388 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3389 I2bv(SrcSI | DstDI | String, em_cmp),
3391 I2bv(DstAcc | SrcImm, em_test),
3392 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3393 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3394 I2bv(SrcAcc | DstDI | String, em_cmp),
3396 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3398 X8(I(DstReg | SrcImm | Mov, em_mov)),
3400 D2bv(DstMem | SrcImmByte | ModRM),
3401 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3402 I(ImplicitOps | Stack, em_ret),
3403 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3404 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3405 G(ByteOp, group11), G(0, group11),
3407 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3408 D(ImplicitOps), DI(SrcImmByte, intn),
3409 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3411 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3414 N, N, N, N, N, N, N, N,
3416 X3(I(SrcImmByte, em_loop)),
3417 I(SrcImmByte, em_jcxz),
3418 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3419 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3421 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3422 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3423 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3424 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3426 N, DI(ImplicitOps, icebp), N, N,
3427 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3428 G(ByteOp, group3), G(0, group3),
3430 D(ImplicitOps), D(ImplicitOps),
3431 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3432 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3435 static struct opcode twobyte_table[256] = {
3437 G(0, group6), GD(0, &group7), N, N,
3438 N, I(ImplicitOps | VendorSpecific, em_syscall),
3439 II(ImplicitOps | Priv, em_clts, clts), N,
3440 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3441 N, D(ImplicitOps | ModRM), N, N,
3443 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3445 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3446 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3447 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3448 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3450 N, N, N, N, N, N, N, N,
3452 DI(ImplicitOps | Priv, wrmsr),
3453 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3454 DI(ImplicitOps | Priv, rdmsr),
3455 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3456 I(ImplicitOps | VendorSpecific, em_sysenter),
3457 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3459 N, N, N, N, N, N, N, N,
3461 X16(D(DstReg | SrcMem | ModRM | Mov)),
3463 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3468 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3473 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3477 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3479 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3480 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3481 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3482 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3484 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3485 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3486 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3487 D(DstMem | SrcReg | Src2CL | ModRM),
3488 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3490 D2bv(DstMem | SrcReg | ModRM | Lock),
3491 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3492 D(DstMem | SrcReg | ModRM | BitOp | Lock),
3493 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3494 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3495 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3498 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3499 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3500 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3502 D2bv(DstMem | SrcReg | ModRM | Lock),
3503 N, D(DstMem | SrcReg | ModRM | Mov),
3504 N, N, N, GD(0, &group9),
3505 N, N, N, N, N, N, N, N,
3507 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3509 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3511 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3527 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3531 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3537 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3538 unsigned size, bool sign_extension)
3540 int rc = X86EMUL_CONTINUE;
3544 op->addr.mem.ea = ctxt->_eip;
3545 /* NB. Immediates are sign-extended as necessary. */
3546 switch (op->bytes) {
3548 op->val = insn_fetch(s8, ctxt);
3551 op->val = insn_fetch(s16, ctxt);
3554 op->val = insn_fetch(s32, ctxt);
3557 if (!sign_extension) {
3558 switch (op->bytes) {
3566 op->val &= 0xffffffff;
3574 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3577 int rc = X86EMUL_CONTINUE;
3581 decode_register_operand(ctxt, op,
3583 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3586 rc = decode_imm(ctxt, op, 1, false);
3589 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3593 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3594 fetch_bit_operand(ctxt);
3595 op->orig_val = op->val;
3598 ctxt->memop.bytes = 8;
3602 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3603 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3604 fetch_register_operand(op);
3605 op->orig_val = op->val;
3609 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3611 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3612 op->addr.mem.seg = VCPU_SREG_ES;