1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54 #define OpES 20ull /* ES */
55 #define OpCS 21ull /* CS */
56 #define OpSS 22ull /* SS */
57 #define OpDS 23ull /* DS */
58 #define OpFS 24ull /* FS */
59 #define OpGS 25ull /* GS */
61 #define OpBits 5 /* Width of operand field */
62 #define OpMask ((1ull << OpBits) - 1)
65 * Opcode effective-address decode tables.
66 * Note that we only emulate instructions that have at least one memory
67 * operand (excluding implicit stack references). We assume that stack
68 * references and instruction fetches will never occur in special memory
69 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
73 /* Operand sizes: 8-bit operands or specified/overridden size. */
74 #define ByteOp (1<<0) /* 8-bit operands. */
75 /* Destination operand type. */
77 #define ImplicitOps (OpImplicit << DstShift)
78 #define DstReg (OpReg << DstShift)
79 #define DstMem (OpMem << DstShift)
80 #define DstAcc (OpAcc << DstShift)
81 #define DstDI (OpDI << DstShift)
82 #define DstMem64 (OpMem64 << DstShift)
83 #define DstImmUByte (OpImmUByte << DstShift)
84 #define DstDX (OpDX << DstShift)
85 #define DstMask (OpMask << DstShift)
86 /* Source operand type. */
88 #define SrcNone (OpNone << SrcShift)
89 #define SrcReg (OpReg << SrcShift)
90 #define SrcMem (OpMem << SrcShift)
91 #define SrcMem16 (OpMem16 << SrcShift)
92 #define SrcMem32 (OpMem32 << SrcShift)
93 #define SrcImm (OpImm << SrcShift)
94 #define SrcImmByte (OpImmByte << SrcShift)
95 #define SrcOne (OpOne << SrcShift)
96 #define SrcImmUByte (OpImmUByte << SrcShift)
97 #define SrcImmU (OpImmU << SrcShift)
98 #define SrcSI (OpSI << SrcShift)
99 #define SrcImmFAddr (OpImmFAddr << SrcShift)
100 #define SrcMemFAddr (OpMemFAddr << SrcShift)
101 #define SrcAcc (OpAcc << SrcShift)
102 #define SrcImmU16 (OpImmU16 << SrcShift)
103 #define SrcDX (OpDX << SrcShift)
104 #define SrcMask (OpMask << SrcShift)
105 #define BitOp (1<<11)
106 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
107 #define String (1<<13) /* String instruction (rep capable) */
108 #define Stack (1<<14) /* Stack instruction (push/pop) */
109 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
110 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
111 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
112 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
113 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
114 #define Sse (1<<18) /* SSE Vector instruction */
115 /* Generic ModRM decode. */
116 #define ModRM (1<<19)
117 /* Destination is only written; never read. */
120 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
121 #define VendorSpecific (1<<22) /* Vendor specific instruction */
122 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
123 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
124 #define Undefined (1<<25) /* No Such Instruction */
125 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
126 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
128 /* Source 2 operand type */
129 #define Src2Shift (29)
130 #define Src2None (OpNone << Src2Shift)
131 #define Src2CL (OpCL << Src2Shift)
132 #define Src2ImmByte (OpImmByte << Src2Shift)
133 #define Src2One (OpOne << Src2Shift)
134 #define Src2Imm (OpImm << Src2Shift)
135 #define Src2ES (OpES << Src2Shift)
136 #define Src2CS (OpCS << Src2Shift)
137 #define Src2SS (OpSS << Src2Shift)
138 #define Src2DS (OpDS << Src2Shift)
139 #define Src2FS (OpFS << Src2Shift)
140 #define Src2GS (OpGS << Src2Shift)
141 #define Src2Mask (OpMask << Src2Shift)
143 #define X2(x...) x, x
144 #define X3(x...) X2(x), x
145 #define X4(x...) X2(x), X2(x)
146 #define X5(x...) X4(x), x
147 #define X6(x...) X4(x), X2(x)
148 #define X7(x...) X4(x), X3(x)
149 #define X8(x...) X4(x), X4(x)
150 #define X16(x...) X8(x), X8(x)
156 int (*execute)(struct x86_emulate_ctxt *ctxt);
157 struct opcode *group;
158 struct group_dual *gdual;
159 struct gprefix *gprefix;
161 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
165 struct opcode mod012[8];
166 struct opcode mod3[8];
170 struct opcode pfx_no;
171 struct opcode pfx_66;
172 struct opcode pfx_f2;
173 struct opcode pfx_f3;
176 /* EFLAGS bit definitions. */
177 #define EFLG_ID (1<<21)
178 #define EFLG_VIP (1<<20)
179 #define EFLG_VIF (1<<19)
180 #define EFLG_AC (1<<18)
181 #define EFLG_VM (1<<17)
182 #define EFLG_RF (1<<16)
183 #define EFLG_IOPL (3<<12)
184 #define EFLG_NT (1<<14)
185 #define EFLG_OF (1<<11)
186 #define EFLG_DF (1<<10)
187 #define EFLG_IF (1<<9)
188 #define EFLG_TF (1<<8)
189 #define EFLG_SF (1<<7)
190 #define EFLG_ZF (1<<6)
191 #define EFLG_AF (1<<4)
192 #define EFLG_PF (1<<2)
193 #define EFLG_CF (1<<0)
195 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
196 #define EFLG_RESERVED_ONE_MASK 2
199 * Instruction emulation:
200 * Most instructions are emulated directly via a fragment of inline assembly
201 * code. This allows us to save/restore EFLAGS and thus very easily pick up
202 * any modified flags.
205 #if defined(CONFIG_X86_64)
206 #define _LO32 "k" /* force 32-bit operand */
207 #define _STK "%%rsp" /* stack pointer */
208 #elif defined(__i386__)
209 #define _LO32 "" /* force 32-bit operand */
210 #define _STK "%%esp" /* stack pointer */
214 * These EFLAGS bits are restored from saved value during emulation, and
215 * any changes are written back to the saved value after emulation.
217 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
219 /* Before executing instruction: restore necessary bits in EFLAGS. */
220 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
221 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
222 "movl %"_sav",%"_LO32 _tmp"; " \
225 "movl %"_msk",%"_LO32 _tmp"; " \
226 "andl %"_LO32 _tmp",("_STK"); " \
228 "notl %"_LO32 _tmp"; " \
229 "andl %"_LO32 _tmp",("_STK"); " \
230 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
232 "orl %"_LO32 _tmp",("_STK"); " \
236 /* After executing instruction: write-back necessary bits in EFLAGS. */
237 #define _POST_EFLAGS(_sav, _msk, _tmp) \
238 /* _sav |= EFLAGS & _msk; */ \
241 "andl %"_msk",%"_LO32 _tmp"; " \
242 "orl %"_LO32 _tmp",%"_sav"; "
250 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
252 __asm__ __volatile__ ( \
253 _PRE_EFLAGS("0", "4", "2") \
254 _op _suffix " %"_x"3,%1; " \
255 _POST_EFLAGS("0", "4", "2") \
256 : "=m" ((ctxt)->eflags), \
257 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
259 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
263 /* Raw emulation: instruction has two explicit operands. */
264 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
266 unsigned long _tmp; \
268 switch ((ctxt)->dst.bytes) { \
270 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
273 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
276 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
281 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
283 unsigned long _tmp; \
284 switch ((ctxt)->dst.bytes) { \
286 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
289 __emulate_2op_nobyte(ctxt, _op, \
290 _wx, _wy, _lx, _ly, _qx, _qy); \
295 /* Source operand is byte-sized and may be restricted to just %cl. */
296 #define emulate_2op_SrcB(ctxt, _op) \
297 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
299 /* Source operand is byte, word, long or quad sized. */
300 #define emulate_2op_SrcV(ctxt, _op) \
301 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
303 /* Source operand is word, long or quad sized. */
304 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
305 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
307 /* Instruction has three operands and one operand is stored in ECX register */
308 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
310 unsigned long _tmp; \
311 _type _clv = (ctxt)->src2.val; \
312 _type _srcv = (ctxt)->src.val; \
313 _type _dstv = (ctxt)->dst.val; \
315 __asm__ __volatile__ ( \
316 _PRE_EFLAGS("0", "5", "2") \
317 _op _suffix " %4,%1 \n" \
318 _POST_EFLAGS("0", "5", "2") \
319 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
320 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
323 (ctxt)->src2.val = (unsigned long) _clv; \
324 (ctxt)->src2.val = (unsigned long) _srcv; \
325 (ctxt)->dst.val = (unsigned long) _dstv; \
328 #define emulate_2op_cl(ctxt, _op) \
330 switch ((ctxt)->dst.bytes) { \
332 __emulate_2op_cl(ctxt, _op, "w", u16); \
335 __emulate_2op_cl(ctxt, _op, "l", u32); \
338 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
343 #define __emulate_1op(ctxt, _op, _suffix) \
345 unsigned long _tmp; \
347 __asm__ __volatile__ ( \
348 _PRE_EFLAGS("0", "3", "2") \
349 _op _suffix " %1; " \
350 _POST_EFLAGS("0", "3", "2") \
351 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
353 : "i" (EFLAGS_MASK)); \
356 /* Instruction has only one explicit operand (no source operand). */
357 #define emulate_1op(ctxt, _op) \
359 switch ((ctxt)->dst.bytes) { \
360 case 1: __emulate_1op(ctxt, _op, "b"); break; \
361 case 2: __emulate_1op(ctxt, _op, "w"); break; \
362 case 4: __emulate_1op(ctxt, _op, "l"); break; \
363 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
367 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
369 unsigned long _tmp; \
370 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
371 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
373 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0", "5", "1") \
376 _op _suffix " %6; " \
378 _POST_EFLAGS("0", "5", "1") \
379 ".pushsection .fixup,\"ax\" \n\t" \
380 "3: movb $1, %4 \n\t" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
385 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
386 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
387 "a" (*rax), "d" (*rdx)); \
390 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
391 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
393 switch((ctxt)->src.bytes) { \
395 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
398 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
401 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
404 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
409 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
410 enum x86_intercept intercept,
411 enum x86_intercept_stage stage)
413 struct x86_instruction_info info = {
414 .intercept = intercept,
415 .rep_prefix = ctxt->rep_prefix,
416 .modrm_mod = ctxt->modrm_mod,
417 .modrm_reg = ctxt->modrm_reg,
418 .modrm_rm = ctxt->modrm_rm,
419 .src_val = ctxt->src.val64,
420 .src_bytes = ctxt->src.bytes,
421 .dst_bytes = ctxt->dst.bytes,
422 .ad_bytes = ctxt->ad_bytes,
423 .next_rip = ctxt->eip,
426 return ctxt->ops->intercept(ctxt, &info, stage);
429 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
431 return (1UL << (ctxt->ad_bytes << 3)) - 1;
434 /* Access/update address held in a register, based on addressing mode. */
435 static inline unsigned long
436 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
438 if (ctxt->ad_bytes == sizeof(unsigned long))
441 return reg & ad_mask(ctxt);
444 static inline unsigned long
445 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
447 return address_mask(ctxt, reg);
451 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
453 if (ctxt->ad_bytes == sizeof(unsigned long))
456 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
459 static u32 desc_limit_scaled(struct desc_struct *desc)
461 u32 limit = get_desc_limit(desc);
463 return desc->g ? (limit << 12) | 0xfff : limit;
466 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
468 ctxt->has_seg_override = true;
469 ctxt->seg_override = seg;
472 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
474 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
477 return ctxt->ops->get_cached_segment_base(ctxt, seg);
480 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
482 if (!ctxt->has_seg_override)
485 return ctxt->seg_override;
488 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
489 u32 error, bool valid)
491 ctxt->exception.vector = vec;
492 ctxt->exception.error_code = error;
493 ctxt->exception.error_code_valid = valid;
494 return X86EMUL_PROPAGATE_FAULT;
497 static int emulate_db(struct x86_emulate_ctxt *ctxt)
499 return emulate_exception(ctxt, DB_VECTOR, 0, false);
502 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
504 return emulate_exception(ctxt, GP_VECTOR, err, true);
507 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
509 return emulate_exception(ctxt, SS_VECTOR, err, true);
512 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
514 return emulate_exception(ctxt, UD_VECTOR, 0, false);
517 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
519 return emulate_exception(ctxt, TS_VECTOR, err, true);
522 static int emulate_de(struct x86_emulate_ctxt *ctxt)
524 return emulate_exception(ctxt, DE_VECTOR, 0, false);
527 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
529 return emulate_exception(ctxt, NM_VECTOR, 0, false);
532 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
535 switch (ctxt->op_bytes) {
537 ctxt->_eip = (u16)dst;
540 ctxt->_eip = (u32)dst;
544 if ((cs_l && is_noncanonical_address(dst)) ||
545 (!cs_l && (dst >> 32) != 0))
546 return emulate_gp(ctxt, 0);
551 WARN(1, "unsupported eip assignment size\n");
553 return X86EMUL_CONTINUE;
556 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
558 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
561 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
563 return assign_eip_near(ctxt, ctxt->_eip + rel);
566 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
569 struct desc_struct desc;
571 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
575 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
580 struct desc_struct desc;
582 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
583 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
586 static int __linearize(struct x86_emulate_ctxt *ctxt,
587 struct segmented_address addr,
588 unsigned size, bool write, bool fetch,
591 struct desc_struct desc;
598 la = seg_base(ctxt, addr.seg) + addr.ea;
599 switch (ctxt->mode) {
600 case X86EMUL_MODE_REAL:
602 case X86EMUL_MODE_PROT64:
603 if (((signed long)la << 16) >> 16 != la)
604 return emulate_gp(ctxt, 0);
607 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
611 /* code segment or read-only data segment */
612 if (((desc.type & 8) || !(desc.type & 2)) && write)
614 /* unreadable code segment */
615 if (!fetch && (desc.type & 8) && !(desc.type & 2))
617 lim = desc_limit_scaled(&desc);
618 if ((desc.type & 8) || !(desc.type & 4)) {
619 /* expand-up segment */
620 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
623 /* exapand-down segment */
624 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
626 lim = desc.d ? 0xffffffff : 0xffff;
627 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
630 cpl = ctxt->ops->cpl(ctxt);
633 if (!(desc.type & 8)) {
637 } else if ((desc.type & 8) && !(desc.type & 4)) {
638 /* nonconforming code segment */
641 } else if ((desc.type & 8) && (desc.type & 4)) {
642 /* conforming code segment */
648 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
651 return X86EMUL_CONTINUE;
653 if (addr.seg == VCPU_SREG_SS)
654 return emulate_ss(ctxt, addr.seg);
656 return emulate_gp(ctxt, addr.seg);
659 static int linearize(struct x86_emulate_ctxt *ctxt,
660 struct segmented_address addr,
661 unsigned size, bool write,
664 return __linearize(ctxt, addr, size, write, false, linear);
668 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
669 struct segmented_address addr,
676 rc = linearize(ctxt, addr, size, false, &linear);
677 if (rc != X86EMUL_CONTINUE)
679 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
683 * Fetch the next byte of the instruction being emulated which is pointed to
684 * by ctxt->_eip, then increment ctxt->_eip.
686 * Also prefetch the remaining bytes of the instruction without crossing page
687 * boundary if they are not in fetch_cache yet.
689 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
691 struct fetch_cache *fc = &ctxt->fetch;
695 if (ctxt->_eip == fc->end) {
696 unsigned long linear;
697 struct segmented_address addr = { .seg = VCPU_SREG_CS,
699 cur_size = fc->end - fc->start;
700 size = min(15UL - cur_size,
701 PAGE_SIZE - offset_in_page(ctxt->_eip));
702 rc = __linearize(ctxt, addr, size, false, true, &linear);
703 if (unlikely(rc != X86EMUL_CONTINUE))
705 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
706 size, &ctxt->exception);
707 if (unlikely(rc != X86EMUL_CONTINUE))
711 *dest = fc->data[ctxt->_eip - fc->start];
713 return X86EMUL_CONTINUE;
716 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
717 void *dest, unsigned size)
721 /* x86 instructions are limited to 15 bytes. */
722 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
723 return X86EMUL_UNHANDLEABLE;
725 rc = do_insn_fetch_byte(ctxt, dest++);
726 if (rc != X86EMUL_CONTINUE)
729 return X86EMUL_CONTINUE;
732 /* Fetch next part of the instruction being emulated. */
733 #define insn_fetch(_type, _ctxt) \
734 ({ unsigned long _x; \
735 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
736 if (rc != X86EMUL_CONTINUE) \
741 #define insn_fetch_arr(_arr, _size, _ctxt) \
742 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
743 if (rc != X86EMUL_CONTINUE) \
748 * Given the 'reg' portion of a ModRM byte, and a register block, return a
749 * pointer into the block that addresses the relevant register.
750 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
752 static void *decode_register(u8 modrm_reg, unsigned long *regs,
757 p = ®s[modrm_reg];
758 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
759 p = (unsigned char *)®s[modrm_reg & 3] + 1;
763 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
764 struct segmented_address addr,
765 u16 *size, unsigned long *address, int op_bytes)
772 rc = segmented_read_std(ctxt, addr, size, 2);
773 if (rc != X86EMUL_CONTINUE)
776 rc = segmented_read_std(ctxt, addr, address, op_bytes);
780 static int test_cc(unsigned int condition, unsigned int flags)
784 switch ((condition & 15) >> 1) {
786 rc |= (flags & EFLG_OF);
788 case 1: /* b/c/nae */
789 rc |= (flags & EFLG_CF);
792 rc |= (flags & EFLG_ZF);
795 rc |= (flags & (EFLG_CF|EFLG_ZF));
798 rc |= (flags & EFLG_SF);
801 rc |= (flags & EFLG_PF);
804 rc |= (flags & EFLG_ZF);
807 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
811 /* Odd condition identifiers (lsb == 1) have inverted sense. */
812 return (!!rc ^ (condition & 1));
815 static void fetch_register_operand(struct operand *op)
819 op->val = *(u8 *)op->addr.reg;
822 op->val = *(u16 *)op->addr.reg;
825 op->val = *(u32 *)op->addr.reg;
828 op->val = *(u64 *)op->addr.reg;
833 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
835 ctxt->ops->get_fpu(ctxt);
837 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
838 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
839 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
840 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
841 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
842 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
843 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
844 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
846 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
847 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
848 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
849 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
850 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
851 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
852 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
853 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
857 ctxt->ops->put_fpu(ctxt);
860 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
863 ctxt->ops->get_fpu(ctxt);
865 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
866 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
867 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
868 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
869 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
870 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
871 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
872 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
874 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
875 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
876 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
877 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
878 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
879 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
880 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
881 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
885 ctxt->ops->put_fpu(ctxt);
888 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
892 unsigned reg = ctxt->modrm_reg;
893 int highbyte_regs = ctxt->rex_prefix == 0;
895 if (!(ctxt->d & ModRM))
896 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
902 read_sse_reg(ctxt, &op->vec_val, reg);
907 if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
908 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
911 op->addr.reg = decode_register(reg, ctxt->regs, 0);
912 op->bytes = ctxt->op_bytes;
914 fetch_register_operand(op);
915 op->orig_val = op->val;
918 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
922 int index_reg = 0, base_reg = 0, scale;
923 int rc = X86EMUL_CONTINUE;
926 if (ctxt->rex_prefix) {
927 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
928 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
929 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
932 ctxt->modrm = insn_fetch(u8, ctxt);
933 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
934 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
935 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
936 ctxt->modrm_seg = VCPU_SREG_DS;
938 if (ctxt->modrm_mod == 3) {
940 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
941 op->addr.reg = decode_register(ctxt->modrm_rm,
942 ctxt->regs, ctxt->d & ByteOp);
946 op->addr.xmm = ctxt->modrm_rm;
947 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
950 fetch_register_operand(op);
956 if (ctxt->ad_bytes == 2) {
957 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
958 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
959 unsigned si = ctxt->regs[VCPU_REGS_RSI];
960 unsigned di = ctxt->regs[VCPU_REGS_RDI];
962 /* 16-bit ModR/M decode. */
963 switch (ctxt->modrm_mod) {
965 if (ctxt->modrm_rm == 6)
966 modrm_ea += insn_fetch(u16, ctxt);
969 modrm_ea += insn_fetch(s8, ctxt);
972 modrm_ea += insn_fetch(u16, ctxt);
975 switch (ctxt->modrm_rm) {
995 if (ctxt->modrm_mod != 0)
1002 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1003 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1004 ctxt->modrm_seg = VCPU_SREG_SS;
1005 modrm_ea = (u16)modrm_ea;
1007 /* 32/64-bit ModR/M decode. */
1008 if ((ctxt->modrm_rm & 7) == 4) {
1009 sib = insn_fetch(u8, ctxt);
1010 index_reg |= (sib >> 3) & 7;
1011 base_reg |= sib & 7;
1014 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1015 modrm_ea += insn_fetch(s32, ctxt);
1017 modrm_ea += ctxt->regs[base_reg];
1019 modrm_ea += ctxt->regs[index_reg] << scale;
1020 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1021 if (ctxt->mode == X86EMUL_MODE_PROT64)
1022 ctxt->rip_relative = 1;
1024 modrm_ea += ctxt->regs[ctxt->modrm_rm];
1025 switch (ctxt->modrm_mod) {
1027 if (ctxt->modrm_rm == 5)
1028 modrm_ea += insn_fetch(s32, ctxt);
1031 modrm_ea += insn_fetch(s8, ctxt);
1034 modrm_ea += insn_fetch(s32, ctxt);
1038 op->addr.mem.ea = modrm_ea;
1043 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1046 int rc = X86EMUL_CONTINUE;
1049 switch (ctxt->ad_bytes) {
1051 op->addr.mem.ea = insn_fetch(u16, ctxt);
1054 op->addr.mem.ea = insn_fetch(u32, ctxt);
1057 op->addr.mem.ea = insn_fetch(u64, ctxt);
1064 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1068 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1069 mask = ~(ctxt->dst.bytes * 8 - 1);
1071 if (ctxt->src.bytes == 2)
1072 sv = (s16)ctxt->src.val & (s16)mask;
1073 else if (ctxt->src.bytes == 4)
1074 sv = (s32)ctxt->src.val & (s32)mask;
1076 ctxt->dst.addr.mem.ea += (sv >> 3);
1079 /* only subword offset */
1080 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1083 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1084 unsigned long addr, void *dest, unsigned size)
1087 struct read_cache *mc = &ctxt->mem_read;
1090 int n = min(size, 8u);
1092 if (mc->pos < mc->end)
1095 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1097 if (rc != X86EMUL_CONTINUE)
1102 memcpy(dest, mc->data + mc->pos, n);
1107 return X86EMUL_CONTINUE;
1110 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1111 struct segmented_address addr,
1118 rc = linearize(ctxt, addr, size, false, &linear);
1119 if (rc != X86EMUL_CONTINUE)
1121 return read_emulated(ctxt, linear, data, size);
1124 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1125 struct segmented_address addr,
1132 rc = linearize(ctxt, addr, size, true, &linear);
1133 if (rc != X86EMUL_CONTINUE)
1135 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1139 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1140 struct segmented_address addr,
1141 const void *orig_data, const void *data,
1147 rc = linearize(ctxt, addr, size, true, &linear);
1148 if (rc != X86EMUL_CONTINUE)
1150 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1151 size, &ctxt->exception);
1154 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1155 unsigned int size, unsigned short port,
1158 struct read_cache *rc = &ctxt->io_read;
1160 if (rc->pos == rc->end) { /* refill pio read ahead */
1161 unsigned int in_page, n;
1162 unsigned int count = ctxt->rep_prefix ?
1163 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1164 in_page = (ctxt->eflags & EFLG_DF) ?
1165 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1166 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1167 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1171 rc->pos = rc->end = 0;
1172 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1177 memcpy(dest, rc->data + rc->pos, size);
1182 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1183 u16 selector, struct desc_ptr *dt)
1185 struct x86_emulate_ops *ops = ctxt->ops;
1187 if (selector & 1 << 2) {
1188 struct desc_struct desc;
1191 memset (dt, 0, sizeof *dt);
1192 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1195 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1196 dt->address = get_desc_base(&desc);
1198 ops->get_gdt(ctxt, dt);
1201 /* allowed just for 8 bytes segments */
1202 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1203 u16 selector, struct desc_struct *desc)
1206 u16 index = selector >> 3;
1209 get_descriptor_table_ptr(ctxt, selector, &dt);
1211 if (dt.size < index * 8 + 7)
1212 return emulate_gp(ctxt, selector & 0xfffc);
1214 addr = dt.address + index * 8;
1215 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1219 /* allowed just for 8 bytes segments */
1220 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1221 u16 selector, struct desc_struct *desc)
1224 u16 index = selector >> 3;
1227 get_descriptor_table_ptr(ctxt, selector, &dt);
1229 if (dt.size < index * 8 + 7)
1230 return emulate_gp(ctxt, selector & 0xfffc);
1232 addr = dt.address + index * 8;
1233 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1237 /* Does not support long mode */
1238 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1239 u16 selector, int seg, u8 cpl,
1240 struct desc_struct *desc)
1242 struct desc_struct seg_desc;
1244 unsigned err_vec = GP_VECTOR;
1246 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1249 memset(&seg_desc, 0, sizeof seg_desc);
1251 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1252 || ctxt->mode == X86EMUL_MODE_REAL) {
1253 /* set real mode segment descriptor */
1254 set_desc_base(&seg_desc, selector << 4);
1255 set_desc_limit(&seg_desc, 0xffff);
1262 /* NULL selector is not valid for TR, CS and SS */
1263 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1267 /* TR should be in GDT only */
1268 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1271 if (null_selector) /* for NULL selector skip all following checks */
1274 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1275 if (ret != X86EMUL_CONTINUE)
1278 err_code = selector & 0xfffc;
1279 err_vec = GP_VECTOR;
1281 /* can't load system descriptor into segment selecor */
1282 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1286 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1296 * segment is not a writable data segment or segment
1297 * selector's RPL != CPL or segment selector's RPL != CPL
1299 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1303 if (!(seg_desc.type & 8))
1306 if (seg_desc.type & 4) {
1312 if (rpl > cpl || dpl != cpl)
1315 /* CS(RPL) <- CPL */
1316 selector = (selector & 0xfffc) | cpl;
1319 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1322 case VCPU_SREG_LDTR:
1323 if (seg_desc.s || seg_desc.type != 2)
1326 default: /* DS, ES, FS, or GS */
1328 * segment is not a data or readable code segment or
1329 * ((segment is a data or nonconforming code segment)
1330 * and (both RPL and CPL > DPL))
1332 if ((seg_desc.type & 0xa) == 0x8 ||
1333 (((seg_desc.type & 0xc) != 0xc) &&
1334 (rpl > dpl && cpl > dpl)))
1340 /* mark segment as accessed */
1342 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1343 if (ret != X86EMUL_CONTINUE)
1347 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1350 return X86EMUL_CONTINUE;
1352 emulate_exception(ctxt, err_vec, err_code, true);
1353 return X86EMUL_PROPAGATE_FAULT;
1356 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1357 u16 selector, int seg)
1359 u8 cpl = ctxt->ops->cpl(ctxt);
1360 return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL);
1363 static void write_register_operand(struct operand *op)
1365 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1366 switch (op->bytes) {
1368 *(u8 *)op->addr.reg = (u8)op->val;
1371 *(u16 *)op->addr.reg = (u16)op->val;
1374 *op->addr.reg = (u32)op->val;
1375 break; /* 64b: zero-extend */
1377 *op->addr.reg = op->val;
1382 static int writeback(struct x86_emulate_ctxt *ctxt)
1386 switch (ctxt->dst.type) {
1388 write_register_operand(&ctxt->dst);
1391 if (ctxt->lock_prefix)
1392 rc = segmented_cmpxchg(ctxt,
1394 &ctxt->dst.orig_val,
1398 rc = segmented_write(ctxt,
1402 if (rc != X86EMUL_CONTINUE)
1406 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1414 return X86EMUL_CONTINUE;
1417 static int em_push(struct x86_emulate_ctxt *ctxt)
1419 struct segmented_address addr;
1421 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1422 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1423 addr.seg = VCPU_SREG_SS;
1425 /* Disable writeback. */
1426 ctxt->dst.type = OP_NONE;
1427 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1430 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1431 void *dest, int len)
1434 struct segmented_address addr;
1436 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1437 addr.seg = VCPU_SREG_SS;
1438 rc = segmented_read(ctxt, addr, dest, len);
1439 if (rc != X86EMUL_CONTINUE)
1442 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1446 static int em_pop(struct x86_emulate_ctxt *ctxt)
1448 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1451 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1452 void *dest, int len)
1455 unsigned long val, change_mask;
1456 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1457 int cpl = ctxt->ops->cpl(ctxt);
1459 rc = emulate_pop(ctxt, &val, len);
1460 if (rc != X86EMUL_CONTINUE)
1463 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1464 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1466 switch(ctxt->mode) {
1467 case X86EMUL_MODE_PROT64:
1468 case X86EMUL_MODE_PROT32:
1469 case X86EMUL_MODE_PROT16:
1471 change_mask |= EFLG_IOPL;
1473 change_mask |= EFLG_IF;
1475 case X86EMUL_MODE_VM86:
1477 return emulate_gp(ctxt, 0);
1478 change_mask |= EFLG_IF;
1480 default: /* real mode */
1481 change_mask |= (EFLG_IOPL | EFLG_IF);
1485 *(unsigned long *)dest =
1486 (ctxt->eflags & ~change_mask) | (val & change_mask);
1491 static int em_popf(struct x86_emulate_ctxt *ctxt)
1493 ctxt->dst.type = OP_REG;
1494 ctxt->dst.addr.reg = &ctxt->eflags;
1495 ctxt->dst.bytes = ctxt->op_bytes;
1496 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1499 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1501 int seg = ctxt->src2.val;
1503 ctxt->src.val = get_segment_selector(ctxt, seg);
1505 return em_push(ctxt);
1508 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1510 int seg = ctxt->src2.val;
1511 unsigned long selector;
1514 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1515 if (rc != X86EMUL_CONTINUE)
1518 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1522 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1524 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1525 int rc = X86EMUL_CONTINUE;
1526 int reg = VCPU_REGS_RAX;
1528 while (reg <= VCPU_REGS_RDI) {
1529 (reg == VCPU_REGS_RSP) ?
1530 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1533 if (rc != X86EMUL_CONTINUE)
1542 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1544 ctxt->src.val = (unsigned long)ctxt->eflags;
1545 return em_push(ctxt);
1548 static int em_popa(struct x86_emulate_ctxt *ctxt)
1550 int rc = X86EMUL_CONTINUE;
1551 int reg = VCPU_REGS_RDI;
1553 while (reg >= VCPU_REGS_RAX) {
1554 if (reg == VCPU_REGS_RSP) {
1555 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1560 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1561 if (rc != X86EMUL_CONTINUE)
1568 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1570 struct x86_emulate_ops *ops = ctxt->ops;
1577 /* TODO: Add limit checks */
1578 ctxt->src.val = ctxt->eflags;
1580 if (rc != X86EMUL_CONTINUE)
1583 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1585 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1587 if (rc != X86EMUL_CONTINUE)
1590 ctxt->src.val = ctxt->_eip;
1592 if (rc != X86EMUL_CONTINUE)
1595 ops->get_idt(ctxt, &dt);
1597 eip_addr = dt.address + (irq << 2);
1598 cs_addr = dt.address + (irq << 2) + 2;
1600 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1601 if (rc != X86EMUL_CONTINUE)
1604 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1605 if (rc != X86EMUL_CONTINUE)
1608 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1609 if (rc != X86EMUL_CONTINUE)
1617 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1619 switch(ctxt->mode) {
1620 case X86EMUL_MODE_REAL:
1621 return emulate_int_real(ctxt, irq);
1622 case X86EMUL_MODE_VM86:
1623 case X86EMUL_MODE_PROT16:
1624 case X86EMUL_MODE_PROT32:
1625 case X86EMUL_MODE_PROT64:
1627 /* Protected mode interrupts unimplemented yet */
1628 return X86EMUL_UNHANDLEABLE;
1632 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1634 int rc = X86EMUL_CONTINUE;
1635 unsigned long temp_eip = 0;
1636 unsigned long temp_eflags = 0;
1637 unsigned long cs = 0;
1638 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1639 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1640 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1641 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1643 /* TODO: Add stack limit check */
1645 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1647 if (rc != X86EMUL_CONTINUE)
1650 if (temp_eip & ~0xffff)
1651 return emulate_gp(ctxt, 0);
1653 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1655 if (rc != X86EMUL_CONTINUE)
1658 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1660 if (rc != X86EMUL_CONTINUE)
1663 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1665 if (rc != X86EMUL_CONTINUE)
1668 ctxt->_eip = temp_eip;
1671 if (ctxt->op_bytes == 4)
1672 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1673 else if (ctxt->op_bytes == 2) {
1674 ctxt->eflags &= ~0xffff;
1675 ctxt->eflags |= temp_eflags;
1678 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1679 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1684 static int em_iret(struct x86_emulate_ctxt *ctxt)
1686 switch(ctxt->mode) {
1687 case X86EMUL_MODE_REAL:
1688 return emulate_iret_real(ctxt);
1689 case X86EMUL_MODE_VM86:
1690 case X86EMUL_MODE_PROT16:
1691 case X86EMUL_MODE_PROT32:
1692 case X86EMUL_MODE_PROT64:
1694 /* iret from protected mode unimplemented yet */
1695 return X86EMUL_UNHANDLEABLE;
1699 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1703 struct desc_struct new_desc;
1704 u8 cpl = ctxt->ops->cpl(ctxt);
1706 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1708 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
1710 if (rc != X86EMUL_CONTINUE)
1713 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1714 /* Error handling is not implemented. */
1715 if (rc != X86EMUL_CONTINUE)
1716 return X86EMUL_UNHANDLEABLE;
1721 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1723 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1726 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1728 switch (ctxt->modrm_reg) {
1730 emulate_2op_SrcB(ctxt, "rol");
1733 emulate_2op_SrcB(ctxt, "ror");
1736 emulate_2op_SrcB(ctxt, "rcl");
1739 emulate_2op_SrcB(ctxt, "rcr");
1741 case 4: /* sal/shl */
1742 case 6: /* sal/shl */
1743 emulate_2op_SrcB(ctxt, "sal");
1746 emulate_2op_SrcB(ctxt, "shr");
1749 emulate_2op_SrcB(ctxt, "sar");
1752 return X86EMUL_CONTINUE;
1755 static int em_not(struct x86_emulate_ctxt *ctxt)
1757 ctxt->dst.val = ~ctxt->dst.val;
1758 return X86EMUL_CONTINUE;
1761 static int em_neg(struct x86_emulate_ctxt *ctxt)
1763 emulate_1op(ctxt, "neg");
1764 return X86EMUL_CONTINUE;
1767 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1771 emulate_1op_rax_rdx(ctxt, "mul", ex);
1772 return X86EMUL_CONTINUE;
1775 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1779 emulate_1op_rax_rdx(ctxt, "imul", ex);
1780 return X86EMUL_CONTINUE;
1783 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1787 emulate_1op_rax_rdx(ctxt, "div", de);
1789 return emulate_de(ctxt);
1790 return X86EMUL_CONTINUE;
1793 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1797 emulate_1op_rax_rdx(ctxt, "idiv", de);
1799 return emulate_de(ctxt);
1800 return X86EMUL_CONTINUE;
1803 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1805 int rc = X86EMUL_CONTINUE;
1807 switch (ctxt->modrm_reg) {
1809 emulate_1op(ctxt, "inc");
1812 emulate_1op(ctxt, "dec");
1814 case 2: /* call near abs */ {
1816 old_eip = ctxt->_eip;
1817 rc = assign_eip_near(ctxt, ctxt->src.val);
1818 if (rc != X86EMUL_CONTINUE)
1820 ctxt->src.val = old_eip;
1824 case 4: /* jmp abs */
1825 rc = assign_eip_near(ctxt, ctxt->src.val);
1827 case 5: /* jmp far */
1828 rc = em_jmp_far(ctxt);
1837 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1839 u64 old = ctxt->dst.orig_val64;
1841 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1842 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1843 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1844 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1845 ctxt->eflags &= ~EFLG_ZF;
1847 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1848 (u32) ctxt->regs[VCPU_REGS_RBX];
1850 ctxt->eflags |= EFLG_ZF;
1852 return X86EMUL_CONTINUE;
1855 static int em_ret(struct x86_emulate_ctxt *ctxt)
1860 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1861 if (rc != X86EMUL_CONTINUE)
1864 return assign_eip_near(ctxt, eip);
1867 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1870 unsigned long eip, cs;
1871 int cpl = ctxt->ops->cpl(ctxt);
1872 struct desc_struct new_desc;
1874 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1875 if (rc != X86EMUL_CONTINUE)
1877 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1878 if (rc != X86EMUL_CONTINUE)
1880 /* Outer-privilege level return is not implemented */
1881 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
1882 return X86EMUL_UNHANDLEABLE;
1883 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0,
1885 if (rc != X86EMUL_CONTINUE)
1887 rc = assign_eip_far(ctxt, eip, new_desc.l);
1888 /* Error handling is not implemented. */
1889 if (rc != X86EMUL_CONTINUE)
1890 return X86EMUL_UNHANDLEABLE;
1895 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1897 int seg = ctxt->src2.val;
1901 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1903 rc = load_segment_descriptor(ctxt, sel, seg);
1904 if (rc != X86EMUL_CONTINUE)
1907 ctxt->dst.val = ctxt->src.val;
1912 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1913 struct desc_struct *cs, struct desc_struct *ss)
1917 memset(cs, 0, sizeof(struct desc_struct));
1918 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1919 memset(ss, 0, sizeof(struct desc_struct));
1921 cs->l = 0; /* will be adjusted later */
1922 set_desc_base(cs, 0); /* flat segment */
1923 cs->g = 1; /* 4kb granularity */
1924 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1925 cs->type = 0x0b; /* Read, Execute, Accessed */
1927 cs->dpl = 0; /* will be adjusted later */
1931 set_desc_base(ss, 0); /* flat segment */
1932 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1933 ss->g = 1; /* 4kb granularity */
1935 ss->type = 0x03; /* Read/Write, Accessed */
1936 ss->d = 1; /* 32bit stack segment */
1941 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
1943 u32 eax, ebx, ecx, edx;
1946 return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)
1947 && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
1948 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
1949 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
1952 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1954 struct x86_emulate_ops *ops = ctxt->ops;
1955 u32 eax, ebx, ecx, edx;
1958 * syscall should always be enabled in longmode - so only become
1959 * vendor specific (cpuid) if other modes are active...
1961 if (ctxt->mode == X86EMUL_MODE_PROT64)
1966 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1968 * Intel ("GenuineIntel")
1969 * remark: Intel CPUs only support "syscall" in 64bit
1970 * longmode. Also an 64bit guest with a
1971 * 32bit compat-app running will #UD !! While this
1972 * behaviour can be fixed (by emulating) into AMD
1973 * response - CPUs of AMD can't behave like Intel.
1975 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1976 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1977 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1980 /* AMD ("AuthenticAMD") */
1981 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1982 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1983 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
1986 /* AMD ("AMDisbetter!") */
1987 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
1988 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
1989 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
1993 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
1997 static int em_syscall(struct x86_emulate_ctxt *ctxt)
1999 struct x86_emulate_ops *ops = ctxt->ops;
2000 struct desc_struct cs, ss;
2005 /* syscall is not available in real mode */
2006 if (ctxt->mode == X86EMUL_MODE_REAL ||
2007 ctxt->mode == X86EMUL_MODE_VM86)
2008 return emulate_ud(ctxt);
2010 if (!(em_syscall_is_enabled(ctxt)))
2011 return emulate_ud(ctxt);
2013 ops->get_msr(ctxt, MSR_EFER, &efer);
2014 setup_syscalls_segments(ctxt, &cs, &ss);
2016 if (!(efer & EFER_SCE))
2017 return emulate_ud(ctxt);
2019 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2021 cs_sel = (u16)(msr_data & 0xfffc);
2022 ss_sel = (u16)(msr_data + 8);
2024 if (efer & EFER_LMA) {
2028 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2029 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2031 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2032 if (efer & EFER_LMA) {
2033 #ifdef CONFIG_X86_64
2034 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2037 ctxt->mode == X86EMUL_MODE_PROT64 ?
2038 MSR_LSTAR : MSR_CSTAR, &msr_data);
2039 ctxt->_eip = msr_data;
2041 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2042 ctxt->eflags &= ~(msr_data | EFLG_RF);
2046 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2047 ctxt->_eip = (u32)msr_data;
2049 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2052 return X86EMUL_CONTINUE;
2055 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2057 struct x86_emulate_ops *ops = ctxt->ops;
2058 struct desc_struct cs, ss;
2063 ops->get_msr(ctxt, MSR_EFER, &efer);
2064 /* inject #GP if in real mode */
2065 if (ctxt->mode == X86EMUL_MODE_REAL)
2066 return emulate_gp(ctxt, 0);
2069 * Not recognized on AMD in compat mode (but is recognized in legacy
2072 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2073 && !vendor_intel(ctxt))
2074 return emulate_ud(ctxt);
2076 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2077 * Therefore, we inject an #UD.
2079 if (ctxt->mode == X86EMUL_MODE_PROT64)
2080 return emulate_ud(ctxt);
2082 setup_syscalls_segments(ctxt, &cs, &ss);
2084 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2085 if ((msr_data & 0xfffc) == 0x0)
2086 return emulate_gp(ctxt, 0);
2088 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2089 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
2090 ss_sel = cs_sel + 8;
2091 if (efer & EFER_LMA) {
2096 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2097 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2099 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2100 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2102 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2103 ctxt->regs[VCPU_REGS_RSP] = (efer & EFER_LMA) ? msr_data :
2106 return X86EMUL_CONTINUE;
2109 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2111 struct x86_emulate_ops *ops = ctxt->ops;
2112 struct desc_struct cs, ss;
2113 u64 msr_data, rcx, rdx;
2115 u16 cs_sel = 0, ss_sel = 0;
2117 /* inject #GP if in real mode or Virtual 8086 mode */
2118 if (ctxt->mode == X86EMUL_MODE_REAL ||
2119 ctxt->mode == X86EMUL_MODE_VM86)
2120 return emulate_gp(ctxt, 0);
2122 setup_syscalls_segments(ctxt, &cs, &ss);
2124 if ((ctxt->rex_prefix & 0x8) != 0x0)
2125 usermode = X86EMUL_MODE_PROT64;
2127 usermode = X86EMUL_MODE_PROT32;
2129 rcx = ctxt->regs[VCPU_REGS_RCX];
2130 rdx = ctxt->regs[VCPU_REGS_RDX];
2134 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2136 case X86EMUL_MODE_PROT32:
2137 cs_sel = (u16)(msr_data + 16);
2138 if ((msr_data & 0xfffc) == 0x0)
2139 return emulate_gp(ctxt, 0);
2140 ss_sel = (u16)(msr_data + 24);
2142 case X86EMUL_MODE_PROT64:
2143 cs_sel = (u16)(msr_data + 32);
2144 if (msr_data == 0x0)
2145 return emulate_gp(ctxt, 0);
2146 ss_sel = cs_sel + 8;
2149 if (is_noncanonical_address(rcx) ||
2150 is_noncanonical_address(rdx))
2151 return emulate_gp(ctxt, 0);
2154 cs_sel |= SELECTOR_RPL_MASK;
2155 ss_sel |= SELECTOR_RPL_MASK;
2157 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2158 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2161 ctxt->regs[VCPU_REGS_RSP] = rcx;
2163 return X86EMUL_CONTINUE;
2166 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2169 if (ctxt->mode == X86EMUL_MODE_REAL)
2171 if (ctxt->mode == X86EMUL_MODE_VM86)
2173 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2174 return ctxt->ops->cpl(ctxt) > iopl;
2177 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2180 struct x86_emulate_ops *ops = ctxt->ops;
2181 struct desc_struct tr_seg;
2184 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2185 unsigned mask = (1 << len) - 1;
2188 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2191 if (desc_limit_scaled(&tr_seg) < 103)
2193 base = get_desc_base(&tr_seg);
2194 #ifdef CONFIG_X86_64
2195 base |= ((u64)base3) << 32;
2197 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2198 if (r != X86EMUL_CONTINUE)
2200 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2202 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2203 if (r != X86EMUL_CONTINUE)
2205 if ((perm >> bit_idx) & mask)
2210 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2216 if (emulator_bad_iopl(ctxt))
2217 if (!emulator_io_port_access_allowed(ctxt, port, len))
2220 ctxt->perm_ok = true;
2225 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2226 struct tss_segment_16 *tss)
2228 tss->ip = ctxt->_eip;
2229 tss->flag = ctxt->eflags;
2230 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2231 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2232 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2233 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2234 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2235 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2236 tss->si = ctxt->regs[VCPU_REGS_RSI];
2237 tss->di = ctxt->regs[VCPU_REGS_RDI];
2239 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2240 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2241 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2242 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2243 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2246 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2247 struct tss_segment_16 *tss)
2252 ctxt->_eip = tss->ip;
2253 ctxt->eflags = tss->flag | 2;
2254 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2255 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2256 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2257 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2258 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2259 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2260 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2261 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2264 * SDM says that segment selectors are loaded before segment
2267 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2268 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2269 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2270 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2271 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2276 * Now load segment descriptors. If fault happenes at this stage
2277 * it is handled in a context of new task
2279 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2281 if (ret != X86EMUL_CONTINUE)
2283 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2285 if (ret != X86EMUL_CONTINUE)
2287 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2289 if (ret != X86EMUL_CONTINUE)
2291 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2293 if (ret != X86EMUL_CONTINUE)
2295 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2297 if (ret != X86EMUL_CONTINUE)
2300 return X86EMUL_CONTINUE;
2303 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2304 u16 tss_selector, u16 old_tss_sel,
2305 ulong old_tss_base, struct desc_struct *new_desc)
2307 struct x86_emulate_ops *ops = ctxt->ops;
2308 struct tss_segment_16 tss_seg;
2310 u32 new_tss_base = get_desc_base(new_desc);
2312 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2314 if (ret != X86EMUL_CONTINUE)
2315 /* FIXME: need to provide precise fault address */
2318 save_state_to_tss16(ctxt, &tss_seg);
2320 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2322 if (ret != X86EMUL_CONTINUE)
2323 /* FIXME: need to provide precise fault address */
2326 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2328 if (ret != X86EMUL_CONTINUE)
2329 /* FIXME: need to provide precise fault address */
2332 if (old_tss_sel != 0xffff) {
2333 tss_seg.prev_task_link = old_tss_sel;
2335 ret = ops->write_std(ctxt, new_tss_base,
2336 &tss_seg.prev_task_link,
2337 sizeof tss_seg.prev_task_link,
2339 if (ret != X86EMUL_CONTINUE)
2340 /* FIXME: need to provide precise fault address */
2344 return load_state_from_tss16(ctxt, &tss_seg);
2347 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2348 struct tss_segment_32 *tss)
2350 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2351 tss->eip = ctxt->_eip;
2352 tss->eflags = ctxt->eflags;
2353 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2354 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2355 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2356 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2357 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2358 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2359 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2360 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2362 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2363 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2364 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2365 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2366 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2367 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2368 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2371 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2372 struct tss_segment_32 *tss)
2377 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2378 return emulate_gp(ctxt, 0);
2379 ctxt->_eip = tss->eip;
2380 ctxt->eflags = tss->eflags | 2;
2381 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2382 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2383 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2384 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2385 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2386 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2387 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2388 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2391 * SDM says that segment selectors are loaded before segment
2392 * descriptors. This is important because CPL checks will
2395 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2396 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2397 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2398 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2399 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2400 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2401 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2406 * Now load segment descriptors. If fault happenes at this stage
2407 * it is handled in a context of new task
2409 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2411 if (ret != X86EMUL_CONTINUE)
2413 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2415 if (ret != X86EMUL_CONTINUE)
2417 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2419 if (ret != X86EMUL_CONTINUE)
2421 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2423 if (ret != X86EMUL_CONTINUE)
2425 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2427 if (ret != X86EMUL_CONTINUE)
2429 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2431 if (ret != X86EMUL_CONTINUE)
2433 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2435 if (ret != X86EMUL_CONTINUE)
2438 return X86EMUL_CONTINUE;
2441 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2442 u16 tss_selector, u16 old_tss_sel,
2443 ulong old_tss_base, struct desc_struct *new_desc)
2445 struct x86_emulate_ops *ops = ctxt->ops;
2446 struct tss_segment_32 tss_seg;
2448 u32 new_tss_base = get_desc_base(new_desc);
2450 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2452 if (ret != X86EMUL_CONTINUE)
2453 /* FIXME: need to provide precise fault address */
2456 save_state_to_tss32(ctxt, &tss_seg);
2458 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2460 if (ret != X86EMUL_CONTINUE)
2461 /* FIXME: need to provide precise fault address */
2464 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2466 if (ret != X86EMUL_CONTINUE)
2467 /* FIXME: need to provide precise fault address */
2470 if (old_tss_sel != 0xffff) {
2471 tss_seg.prev_task_link = old_tss_sel;
2473 ret = ops->write_std(ctxt, new_tss_base,
2474 &tss_seg.prev_task_link,
2475 sizeof tss_seg.prev_task_link,
2477 if (ret != X86EMUL_CONTINUE)
2478 /* FIXME: need to provide precise fault address */
2482 return load_state_from_tss32(ctxt, &tss_seg);
2485 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2486 u16 tss_selector, int reason,
2487 bool has_error_code, u32 error_code)
2489 struct x86_emulate_ops *ops = ctxt->ops;
2490 struct desc_struct curr_tss_desc, next_tss_desc;
2492 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2493 ulong old_tss_base =
2494 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2497 /* FIXME: old_tss_base == ~0 ? */
2499 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2500 if (ret != X86EMUL_CONTINUE)
2502 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2503 if (ret != X86EMUL_CONTINUE)
2506 /* FIXME: check that next_tss_desc is tss */
2508 if (reason != TASK_SWITCH_IRET) {
2509 if ((tss_selector & 3) > next_tss_desc.dpl ||
2510 ops->cpl(ctxt) > next_tss_desc.dpl)
2511 return emulate_gp(ctxt, 0);
2514 desc_limit = desc_limit_scaled(&next_tss_desc);
2515 if (!next_tss_desc.p ||
2516 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2517 desc_limit < 0x2b)) {
2518 emulate_ts(ctxt, tss_selector & 0xfffc);
2519 return X86EMUL_PROPAGATE_FAULT;
2522 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2523 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2524 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2527 if (reason == TASK_SWITCH_IRET)
2528 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2530 /* set back link to prev task only if NT bit is set in eflags
2531 note that old_tss_sel is not used afetr this point */
2532 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2533 old_tss_sel = 0xffff;
2535 if (next_tss_desc.type & 8)
2536 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2537 old_tss_base, &next_tss_desc);
2539 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2540 old_tss_base, &next_tss_desc);
2541 if (ret != X86EMUL_CONTINUE)
2544 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2545 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2547 if (reason != TASK_SWITCH_IRET) {
2548 next_tss_desc.type |= (1 << 1); /* set busy flag */
2549 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2552 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2553 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2555 if (has_error_code) {
2556 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2557 ctxt->lock_prefix = 0;
2558 ctxt->src.val = (unsigned long) error_code;
2559 ret = em_push(ctxt);
2565 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2566 u16 tss_selector, int reason,
2567 bool has_error_code, u32 error_code)
2571 ctxt->_eip = ctxt->eip;
2572 ctxt->dst.type = OP_NONE;
2574 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2575 has_error_code, error_code);
2577 if (rc == X86EMUL_CONTINUE)
2578 ctxt->eip = ctxt->_eip;
2580 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2583 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2584 int reg, struct operand *op)
2586 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2588 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2589 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2590 op->addr.mem.seg = seg;
2593 static int em_das(struct x86_emulate_ctxt *ctxt)
2596 bool af, cf, old_cf;
2598 cf = ctxt->eflags & X86_EFLAGS_CF;
2604 af = ctxt->eflags & X86_EFLAGS_AF;
2605 if ((al & 0x0f) > 9 || af) {
2607 cf = old_cf | (al >= 250);
2612 if (old_al > 0x99 || old_cf) {
2618 /* Set PF, ZF, SF */
2619 ctxt->src.type = OP_IMM;
2621 ctxt->src.bytes = 1;
2622 emulate_2op_SrcV(ctxt, "or");
2623 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2625 ctxt->eflags |= X86_EFLAGS_CF;
2627 ctxt->eflags |= X86_EFLAGS_AF;
2628 return X86EMUL_CONTINUE;
2631 static int em_call(struct x86_emulate_ctxt *ctxt)
2634 long rel = ctxt->src.val;
2636 ctxt->src.val = (unsigned long)ctxt->_eip;
2637 rc = jmp_rel(ctxt, rel);
2638 if (rc != X86EMUL_CONTINUE)
2640 return em_push(ctxt);
2643 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2648 struct desc_struct old_desc, new_desc;
2649 const struct x86_emulate_ops *ops = ctxt->ops;
2650 int cpl = ctxt->ops->cpl(ctxt);
2652 old_eip = ctxt->_eip;
2653 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
2655 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2656 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2658 if (rc != X86EMUL_CONTINUE)
2659 return X86EMUL_CONTINUE;
2661 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2662 if (rc != X86EMUL_CONTINUE)
2665 ctxt->src.val = old_cs;
2667 if (rc != X86EMUL_CONTINUE)
2670 ctxt->src.val = old_eip;
2672 /* If we failed, we tainted the memory, but the very least we should
2674 if (rc != X86EMUL_CONTINUE)
2678 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2683 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2688 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2689 if (rc != X86EMUL_CONTINUE)
2691 rc = assign_eip_near(ctxt, eip);
2692 if (rc != X86EMUL_CONTINUE)
2694 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2695 return X86EMUL_CONTINUE;
2698 static int em_add(struct x86_emulate_ctxt *ctxt)
2700 emulate_2op_SrcV(ctxt, "add");
2701 return X86EMUL_CONTINUE;
2704 static int em_or(struct x86_emulate_ctxt *ctxt)
2706 emulate_2op_SrcV(ctxt, "or");
2707 return X86EMUL_CONTINUE;
2710 static int em_adc(struct x86_emulate_ctxt *ctxt)
2712 emulate_2op_SrcV(ctxt, "adc");
2713 return X86EMUL_CONTINUE;
2716 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2718 emulate_2op_SrcV(ctxt, "sbb");
2719 return X86EMUL_CONTINUE;
2722 static int em_and(struct x86_emulate_ctxt *ctxt)
2724 emulate_2op_SrcV(ctxt, "and");
2725 return X86EMUL_CONTINUE;
2728 static int em_sub(struct x86_emulate_ctxt *ctxt)
2730 emulate_2op_SrcV(ctxt, "sub");
2731 return X86EMUL_CONTINUE;
2734 static int em_xor(struct x86_emulate_ctxt *ctxt)
2736 emulate_2op_SrcV(ctxt, "xor");
2737 return X86EMUL_CONTINUE;
2740 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2742 emulate_2op_SrcV(ctxt, "cmp");
2743 /* Disable writeback. */
2744 ctxt->dst.type = OP_NONE;
2745 return X86EMUL_CONTINUE;
2748 static int em_test(struct x86_emulate_ctxt *ctxt)
2750 emulate_2op_SrcV(ctxt, "test");
2751 /* Disable writeback. */
2752 ctxt->dst.type = OP_NONE;
2753 return X86EMUL_CONTINUE;
2756 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2758 /* Write back the register source. */
2759 ctxt->src.val = ctxt->dst.val;
2760 write_register_operand(&ctxt->src);
2762 /* Write back the memory destination with implicit LOCK prefix. */
2763 ctxt->dst.val = ctxt->src.orig_val;
2764 ctxt->lock_prefix = 1;
2765 return X86EMUL_CONTINUE;
2768 static int em_imul(struct x86_emulate_ctxt *ctxt)
2770 emulate_2op_SrcV_nobyte(ctxt, "imul");
2771 return X86EMUL_CONTINUE;
2774 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2776 ctxt->dst.val = ctxt->src2.val;
2777 return em_imul(ctxt);
2780 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2782 ctxt->dst.type = OP_REG;
2783 ctxt->dst.bytes = ctxt->src.bytes;
2784 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2785 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2787 return X86EMUL_CONTINUE;
2790 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2794 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2795 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2796 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2797 return X86EMUL_CONTINUE;
2800 static int em_mov(struct x86_emulate_ctxt *ctxt)
2802 ctxt->dst.val = ctxt->src.val;
2803 return X86EMUL_CONTINUE;
2806 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2808 if (ctxt->modrm_reg > VCPU_SREG_GS)
2809 return emulate_ud(ctxt);
2811 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2812 return X86EMUL_CONTINUE;
2815 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2817 u16 sel = ctxt->src.val;
2819 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2820 return emulate_ud(ctxt);
2822 if (ctxt->modrm_reg == VCPU_SREG_SS)
2823 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2825 /* Disable writeback. */
2826 ctxt->dst.type = OP_NONE;
2827 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2830 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2832 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2833 return X86EMUL_CONTINUE;
2836 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2841 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2842 if (rc == X86EMUL_CONTINUE)
2843 ctxt->ops->invlpg(ctxt, linear);
2844 /* Disable writeback. */
2845 ctxt->dst.type = OP_NONE;
2846 return X86EMUL_CONTINUE;
2849 static int em_clts(struct x86_emulate_ctxt *ctxt)
2853 cr0 = ctxt->ops->get_cr(ctxt, 0);
2855 ctxt->ops->set_cr(ctxt, 0, cr0);
2856 return X86EMUL_CONTINUE;
2859 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2863 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2864 return X86EMUL_UNHANDLEABLE;
2866 rc = ctxt->ops->fix_hypercall(ctxt);
2867 if (rc != X86EMUL_CONTINUE)
2870 /* Let the processor re-execute the fixed hypercall */
2871 ctxt->_eip = ctxt->eip;
2872 /* Disable writeback. */
2873 ctxt->dst.type = OP_NONE;
2874 return X86EMUL_CONTINUE;
2877 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2879 struct desc_ptr desc_ptr;
2882 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2883 &desc_ptr.size, &desc_ptr.address,
2885 if (rc != X86EMUL_CONTINUE)
2887 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2888 /* Disable writeback. */
2889 ctxt->dst.type = OP_NONE;
2890 return X86EMUL_CONTINUE;
2893 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2897 rc = ctxt->ops->fix_hypercall(ctxt);
2899 /* Disable writeback. */
2900 ctxt->dst.type = OP_NONE;
2904 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2906 struct desc_ptr desc_ptr;
2909 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2910 &desc_ptr.size, &desc_ptr.address,
2912 if (rc != X86EMUL_CONTINUE)
2914 ctxt->ops->set_idt(ctxt, &desc_ptr);
2915 /* Disable writeback. */
2916 ctxt->dst.type = OP_NONE;
2917 return X86EMUL_CONTINUE;
2920 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2922 ctxt->dst.bytes = 2;
2923 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2924 return X86EMUL_CONTINUE;
2927 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2929 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2930 | (ctxt->src.val & 0x0f));
2931 ctxt->dst.type = OP_NONE;
2932 return X86EMUL_CONTINUE;
2935 static int em_loop(struct x86_emulate_ctxt *ctxt)
2937 int rc = X86EMUL_CONTINUE;
2939 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2940 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2941 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2942 rc = jmp_rel(ctxt, ctxt->src.val);
2947 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2949 int rc = X86EMUL_CONTINUE;
2951 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2952 rc = jmp_rel(ctxt, ctxt->src.val);
2957 static int em_cli(struct x86_emulate_ctxt *ctxt)
2959 if (emulator_bad_iopl(ctxt))
2960 return emulate_gp(ctxt, 0);
2962 ctxt->eflags &= ~X86_EFLAGS_IF;
2963 return X86EMUL_CONTINUE;
2966 static int em_sti(struct x86_emulate_ctxt *ctxt)
2968 if (emulator_bad_iopl(ctxt))
2969 return emulate_gp(ctxt, 0);
2971 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2972 ctxt->eflags |= X86_EFLAGS_IF;
2973 return X86EMUL_CONTINUE;
2976 static bool valid_cr(int nr)
2988 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2990 if (!valid_cr(ctxt->modrm_reg))
2991 return emulate_ud(ctxt);
2993 return X86EMUL_CONTINUE;
2996 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2998 u64 new_val = ctxt->src.val64;
2999 int cr = ctxt->modrm_reg;
3002 static u64 cr_reserved_bits[] = {
3003 0xffffffff00000000ULL,
3004 0, 0, 0, /* CR3 checked later */
3011 return emulate_ud(ctxt);
3013 if (new_val & cr_reserved_bits[cr])
3014 return emulate_gp(ctxt, 0);
3019 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3020 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3021 return emulate_gp(ctxt, 0);
3023 cr4 = ctxt->ops->get_cr(ctxt, 4);
3024 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3026 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3027 !(cr4 & X86_CR4_PAE))
3028 return emulate_gp(ctxt, 0);
3035 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3036 if (efer & EFER_LMA)
3037 rsvd = CR3_L_MODE_RESERVED_BITS;
3038 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3039 rsvd = CR3_PAE_RESERVED_BITS;
3040 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3041 rsvd = CR3_NONPAE_RESERVED_BITS;
3044 return emulate_gp(ctxt, 0);
3051 cr4 = ctxt->ops->get_cr(ctxt, 4);
3052 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3054 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3055 return emulate_gp(ctxt, 0);
3061 return X86EMUL_CONTINUE;
3064 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3068 ctxt->ops->get_dr(ctxt, 7, &dr7);
3070 /* Check if DR7.Global_Enable is set */
3071 return dr7 & (1 << 13);
3074 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3076 int dr = ctxt->modrm_reg;
3080 return emulate_ud(ctxt);
3082 cr4 = ctxt->ops->get_cr(ctxt, 4);
3083 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3084 return emulate_ud(ctxt);
3086 if (check_dr7_gd(ctxt))
3087 return emulate_db(ctxt);
3089 return X86EMUL_CONTINUE;
3092 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3094 u64 new_val = ctxt->src.val64;
3095 int dr = ctxt->modrm_reg;
3097 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3098 return emulate_gp(ctxt, 0);
3100 return check_dr_read(ctxt);
3103 static int check_svme(struct x86_emulate_ctxt *ctxt)
3107 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3109 if (!(efer & EFER_SVME))
3110 return emulate_ud(ctxt);
3112 return X86EMUL_CONTINUE;
3115 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3117 u64 rax = ctxt->regs[VCPU_REGS_RAX];
3119 /* Valid physical address? */
3120 if (rax & 0xffff000000000000ULL)
3121 return emulate_gp(ctxt, 0);
3123 return check_svme(ctxt);
3126 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3128 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3130 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3131 return emulate_ud(ctxt);
3133 return X86EMUL_CONTINUE;
3136 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3138 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3139 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3141 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3143 return emulate_gp(ctxt, 0);
3145 return X86EMUL_CONTINUE;
3148 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3150 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3151 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3152 return emulate_gp(ctxt, 0);
3154 return X86EMUL_CONTINUE;
3157 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3159 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3160 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3161 return emulate_gp(ctxt, 0);
3163 return X86EMUL_CONTINUE;
3166 #define D(_y) { .flags = (_y) }
3167 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3168 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3169 .check_perm = (_p) }
3171 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3172 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3173 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3174 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3175 #define II(_f, _e, _i) \
3176 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3177 #define IIP(_f, _e, _i, _p) \
3178 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3179 .check_perm = (_p) }
3180 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3182 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3183 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3184 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3186 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3187 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3188 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3190 static struct opcode group7_rm1[] = {
3191 DI(SrcNone | ModRM | Priv, monitor),
3192 DI(SrcNone | ModRM | Priv, mwait),
3196 static struct opcode group7_rm3[] = {
3197 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3198 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3199 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3200 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3201 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3202 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3203 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3204 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3207 static struct opcode group7_rm7[] = {
3209 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3213 static struct opcode group1[] = {
3224 static struct opcode group1A[] = {
3225 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3228 static struct opcode group3[] = {
3229 I(DstMem | SrcImm | ModRM, em_test),
3230 I(DstMem | SrcImm | ModRM, em_test),
3231 I(DstMem | SrcNone | ModRM | Lock, em_not),
3232 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3233 I(SrcMem | ModRM, em_mul_ex),
3234 I(SrcMem | ModRM, em_imul_ex),
3235 I(SrcMem | ModRM, em_div_ex),
3236 I(SrcMem | ModRM, em_idiv_ex),
3239 static struct opcode group4[] = {
3240 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3244 static struct opcode group5[] = {
3245 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3246 D(SrcMem | ModRM | Stack),
3247 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3248 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3249 D(SrcMem | ModRM | Stack), N,
3252 static struct opcode group6[] = {
3253 DI(ModRM | Prot, sldt),
3254 DI(ModRM | Prot, str),
3255 DI(ModRM | Prot | Priv, lldt),
3256 DI(ModRM | Prot | Priv, ltr),
3260 static struct group_dual group7 = { {
3261 DI(ModRM | Mov | DstMem | Priv, sgdt),
3262 DI(ModRM | Mov | DstMem | Priv, sidt),
3263 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3264 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3265 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3266 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3267 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3269 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3271 N, EXT(0, group7_rm3),
3272 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3273 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3276 static struct opcode group8[] = {
3278 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3279 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3282 static struct group_dual group9 = { {
3283 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3285 N, N, N, N, N, N, N, N,
3288 static struct opcode group11[] = {
3289 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3292 static struct gprefix pfx_0f_6f_0f_7f = {
3293 N, N, N, I(Sse, em_movdqu),
3296 static struct opcode opcode_table[256] = {
3298 I6ALU(Lock, em_add),
3299 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3300 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3303 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3306 I6ALU(Lock, em_adc),
3307 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3308 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3310 I6ALU(Lock, em_sbb),
3311 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3312 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3314 I6ALU(Lock, em_and), N, N,
3316 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3318 I6ALU(Lock, em_xor), N, N,
3320 I6ALU(0, em_cmp), N, N,
3324 X8(I(SrcReg | Stack, em_push)),
3326 X8(I(DstReg | Stack, em_pop)),
3328 I(ImplicitOps | Stack | No64, em_pusha),
3329 I(ImplicitOps | Stack | No64, em_popa),
3330 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3333 I(SrcImm | Mov | Stack, em_push),
3334 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3335 I(SrcImmByte | Mov | Stack, em_push),
3336 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3337 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3338 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3342 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3343 G(DstMem | SrcImm | ModRM | Group, group1),
3344 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3345 G(DstMem | SrcImmByte | ModRM | Group, group1),
3346 I2bv(DstMem | SrcReg | ModRM, em_test),
3347 I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3349 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3350 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3351 I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3352 D(ModRM | SrcMem | NoAccess | DstReg),
3353 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3356 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3358 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3359 I(SrcImmFAddr | No64, em_call_far), N,
3360 II(ImplicitOps | Stack, em_pushf, pushf),
3361 II(ImplicitOps | Stack, em_popf, popf), N, N,
3363 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3364 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3365 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3366 I2bv(SrcSI | DstDI | String, em_cmp),
3368 I2bv(DstAcc | SrcImm, em_test),
3369 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3370 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3371 I2bv(SrcAcc | DstDI | String, em_cmp),
3373 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3375 X8(I(DstReg | SrcImm | Mov, em_mov)),
3377 D2bv(DstMem | SrcImmByte | ModRM),
3378 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3379 I(ImplicitOps | Stack, em_ret),
3380 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3381 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3382 G(ByteOp, group11), G(0, group11),
3384 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3385 D(ImplicitOps), DI(SrcImmByte, intn),
3386 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3388 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3391 N, N, N, N, N, N, N, N,
3393 X3(I(SrcImmByte, em_loop)),
3394 I(SrcImmByte, em_jcxz),
3395 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3396 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3398 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3399 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3400 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3401 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3403 N, DI(ImplicitOps, icebp), N, N,
3404 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3405 G(ByteOp, group3), G(0, group3),
3407 D(ImplicitOps), D(ImplicitOps),
3408 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3409 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3412 static struct opcode twobyte_table[256] = {
3414 G(0, group6), GD(0, &group7), N, N,
3415 N, I(ImplicitOps | VendorSpecific, em_syscall),
3416 II(ImplicitOps | Priv, em_clts, clts), N,
3417 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3418 N, D(ImplicitOps | ModRM), N, N,
3420 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3422 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3423 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3424 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3425 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3427 N, N, N, N, N, N, N, N,
3429 DI(ImplicitOps | Priv, wrmsr),
3430 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3431 DI(ImplicitOps | Priv, rdmsr),
3432 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3433 I(ImplicitOps | VendorSpecific, em_sysenter),
3434 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3436 N, N, N, N, N, N, N, N,
3438 X16(D(DstReg | SrcMem | ModRM | Mov)),
3440 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3445 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3450 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3454 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3456 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3457 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3458 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3459 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3461 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3462 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3463 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3464 D(DstMem | SrcReg | Src2CL | ModRM),
3465 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3467 D2bv(DstMem | SrcReg | ModRM | Lock),
3468 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3469 D(DstMem | SrcReg | ModRM | BitOp | Lock),
3470 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3471 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3472 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3475 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3476 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3477 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3479 D2bv(DstMem | SrcReg | ModRM | Lock),
3480 N, D(DstMem | SrcReg | ModRM | Mov),
3481 N, N, N, GD(0, &group9),
3482 N, N, N, N, N, N, N, N,
3484 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3486 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3488 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3504 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3508 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3514 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3515 unsigned size, bool sign_extension)
3517 int rc = X86EMUL_CONTINUE;
3521 op->addr.mem.ea = ctxt->_eip;
3522 /* NB. Immediates are sign-extended as necessary. */
3523 switch (op->bytes) {
3525 op->val = insn_fetch(s8, ctxt);
3528 op->val = insn_fetch(s16, ctxt);
3531 op->val = insn_fetch(s32, ctxt);
3534 if (!sign_extension) {
3535 switch (op->bytes) {
3543 op->val &= 0xffffffff;
3551 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3554 int rc = X86EMUL_CONTINUE;
3558 decode_register_operand(ctxt, op,
3560 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3563 rc = decode_imm(ctxt, op, 1, false);
3566 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3570 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3571 fetch_bit_operand(ctxt);
3572 op->orig_val = op->val;
3575 ctxt->memop.bytes = 8;
3579 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3580 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3581 fetch_register_operand(op);
3582 op->orig_val = op->val;
3586 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3588 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3589 op->addr.mem.seg = VCPU_SREG_ES;
3595 op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3596 fetch_register_operand(op);
3600 op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3603 rc = decode_imm(ctxt, op, 1, true);
3610 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3613 ctxt->memop.bytes = 2;
3616 ctxt->memop.bytes = 4;
3619 rc = decode_imm(ctxt, op, 2, false);
3622 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3626 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3628 register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3629 op->addr.mem.seg = seg_override(ctxt);
3634 op->addr.mem.ea = ctxt->_eip;
3635 op->bytes = ctxt->op_bytes + 2;
3636 insn_fetch_arr(op->valptr, op->bytes, ctxt);
3639 ctxt->memop.bytes = ctxt->op_bytes + 2;
3642 op->val = VCPU_SREG_ES;
3645 op->val = VCPU_SREG_CS;
3648 op->val = VCPU_SREG_SS;
3651 op->val = VCPU_SREG_DS;
3654 op->val = VCPU_SREG_FS;
3657 op->val = VCPU_SREG_GS;
3660 /* Special instructions do their own operand decoding. */
3662 op->type = OP_NONE; /* Disable writeback. */
3670 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3672 int rc = X86EMUL_CONTINUE;
3673 int mode = ctxt->mode;
3674 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3675 bool op_prefix = false;
3676 struct opcode opcode;
3678 ctxt->memop.type = OP_NONE;
3679 ctxt->memopp = NULL;
3680 ctxt->_eip = ctxt->eip;
3681 ctxt->fetch.start = ctxt->_eip;
3682 ctxt->fetch.end = ctxt->fetch.start + insn_len;
3684 memcpy(ctxt->fetch.data, insn, insn_len);
3687 case X86EMUL_MODE_REAL:
3688 case X86EMUL_MODE_VM86:
3689 case X86EMUL_MODE_PROT16:
3690 def_op_bytes = def_ad_bytes = 2;
3692 case X86EMUL_MODE_PROT32:
3693 def_op_bytes = def_ad_bytes = 4;
3695 #ifdef CONFIG_X86_64
3696 case X86EMUL_MODE_PROT64:
3702 return EMULATION_FAILED;
3705 ctxt->op_bytes = def_op_bytes;
3706 ctxt->ad_bytes = def_ad_bytes;
3708 /* Legacy prefixes. */
3710 switch (ctxt->b = insn_fetch(u8, ctxt)) {
3711 case 0x66: /* operand-size override */
3713 /* switch between 2/4 bytes */
3714 ctxt->op_bytes = def_op_bytes ^ 6;
3716 case 0x67: /* address-size override */
3717 if (mode == X86EMUL_MODE_PROT64)
3718 /* switch between 4/8 bytes */
3719 ctxt->ad_bytes = def_ad_bytes ^ 12;
3721 /* switch between 2/4 bytes */
3722 ctxt->ad_bytes = def_ad_bytes ^ 6;
3724 case 0x26: /* ES override */
3725 case 0x2e: /* CS override */
3726 case 0x36: /* SS override */
3727 case 0x3e: /* DS override */
3728 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3730 case 0x64: /* FS override */
3731 case 0x65: /* GS override */
3732 set_seg_override(ctxt, ctxt->b & 7);
3734 case 0x40 ... 0x4f: /* REX */
3735 if (mode != X86EMUL_MODE_PROT64)
3737 ctxt->rex_prefix = ctxt->b;
3739 case 0xf0: /* LOCK */
3740 ctxt->lock_prefix = 1;
3742 case 0xf2: /* REPNE/REPNZ */
3743 case 0xf3: /* REP/REPE/REPZ */
3744 ctxt->rep_prefix = ctxt->b;
3750 /* Any legacy prefix after a REX prefix nullifies its effect. */
3752 ctxt->rex_prefix = 0;
3758 if (ctxt->rex_prefix & 8)
3759 ctxt->op_bytes = 8; /* REX.W */
3761 /* Opcode byte(s). */
3762 opcode = opcode_table[ctxt->b];
3763 /* Two-byte opcode? */
3764 if (ctxt->b == 0x0f) {
3766 ctxt->b = insn_fetch(u8, ctxt);
3767 opcode = twobyte_table[ctxt->b];
3769 ctxt->d = opcode.flags;
3771 while (ctxt->d & GroupMask) {
3772 switch (ctxt->d & GroupMask) {
3774 ctxt->modrm = insn_fetch(u8, ctxt);
3776 goffset = (ctxt->modrm >> 3) & 7;
3777 opcode = opcode.u.group[goffset];
3780 ctxt->modrm = insn_fetch(u8, ctxt);
3782 goffset = (ctxt->modrm >> 3) & 7;
3783 if ((ctxt->modrm >> 6) == 3)
3784 opcode = opcode.u.gdual->mod3[goffset];
3786 opcode = opcode.u.gdual->mod012[goffset];
3789 goffset = ctxt->modrm & 7;
3790 opcode = opcode.u.group[goffset];
3793 if (ctxt->rep_prefix && op_prefix)
3794 return EMULATION_FAILED;
3795 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3796 switch (simd_prefix) {
3797 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3798 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3799 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3800 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3804 return EMULATION_FAILED;
3807 ctxt->d &= ~(u64)GroupMask;
3808 ctxt->d |= opcode.flags;
3811 ctxt->execute = opcode.u.execute;
3812 ctxt->check_perm = opcode.check_perm;
3813 ctxt->intercept = opcode.intercept;
3816 if (ctxt->d == 0 || (ctxt->d & Undefined))
3817 return EMULATION_FAILED;
3819 if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3820 return EMULATION_FAILED;
3822 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3825 if (ctxt->d & Op3264) {
3826 if (mode == X86EMUL_MODE_PROT64)
3833 ctxt->op_bytes = 16;
3835 /* ModRM and SIB bytes. */
3836 if (ctxt->d & ModRM) {
3837 rc = decode_modrm(ctxt, &ctxt->memop);
3838 if (!ctxt->has_seg_override)
3839 set_seg_override(ctxt, ctxt->modrm_seg);
3840 } else if (ctxt->d & MemAbs)
3841 rc = decode_abs(ctxt, &ctxt->memop);
3842 if (rc != X86EMUL_CONTINUE)
3845 if (!ctxt->has_seg_override)
3846 set_seg_override(ctxt, VCPU_SREG_DS);
3848 ctxt->memop.addr.mem.seg = seg_override(ctxt);
3850 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
3851 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
3854 * Decode and fetch the source operand: register, memory
3857 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
3858 if (rc != X86EMUL_CONTINUE)
3862 * Decode and fetch the second source operand: register, memory
3865 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
3866 if (rc != X86EMUL_CONTINUE)
3869 /* Decode and fetch the destination operand: register or memory. */
3870 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
3873 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
3874 ctxt->memopp->addr.mem.ea += ctxt->_eip;
3876 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
3879 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3881 /* The second termination condition only applies for REPE
3882 * and REPNE. Test if the repeat string operation prefix is
3883 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3884 * corresponding termination condition according to:
3885 * - if REPE/REPZ and ZF = 0 then done
3886 * - if REPNE/REPNZ and ZF = 1 then done
3888 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3889 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3890 && (((ctxt->rep_prefix == REPE_PREFIX) &&
3891 ((ctxt->eflags & EFLG_ZF) == 0))
3892 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
3893 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3899 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3901 struct x86_emulate_ops *ops = ctxt->ops;
3903 int rc = X86EMUL_CONTINUE;
3904 int saved_dst_type = ctxt->dst.type;
3906 ctxt->mem_read.pos = 0;
3908 if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3909 rc = emulate_ud(ctxt);
3913 /* LOCK prefix is allowed only with some instructions */
3914 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3915 rc = emulate_ud(ctxt);
3919 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3920 rc = emulate_ud(ctxt);
3925 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3926 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3927 rc = emulate_ud(ctxt);
3931 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3932 rc = emulate_nm(ctxt);
3936 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3937 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3938 X86_ICPT_PRE_EXCEPT);
3939 if (rc != X86EMUL_CONTINUE)
3943 /* Privileged instruction can be executed only in CPL=0 */
3944 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3945 rc = emulate_gp(ctxt, 0);
3949 /* Instruction can only be executed in protected mode */
3950 if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3951 rc = emulate_ud(ctxt);
3955 /* Do instruction specific permission checks */
3956 if (ctxt->check_perm) {
3957 rc = ctxt->check_perm(ctxt);
3958 if (rc != X86EMUL_CONTINUE)
3962 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3963 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3964 X86_ICPT_POST_EXCEPT);
3965 if (rc != X86EMUL_CONTINUE)
3969 if (ctxt->rep_prefix && (ctxt->d & String)) {
3970 /* All REP prefixes have the same first termination condition */
3971 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3972 ctxt->eip = ctxt->_eip;
3977 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3978 rc = segmented_read(ctxt, ctxt->src.addr.mem,
3979 ctxt->src.valptr, ctxt->src.bytes);
3980 if (rc != X86EMUL_CONTINUE)
3982 ctxt->src.orig_val64 = ctxt->src.val64;
3985 if (ctxt->src2.type == OP_MEM) {
3986 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3987 &ctxt->src2.val, ctxt->src2.bytes);
3988 if (rc != X86EMUL_CONTINUE)
3992 if ((ctxt->d & DstMask) == ImplicitOps)
3996 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3997 /* optimisation - avoid slow emulated read if Mov */
3998 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
3999 &ctxt->dst.val, ctxt->dst.bytes);
4000 if (rc != X86EMUL_CONTINUE)
4003 /* Copy full 64-bit value for CMPXCHG8B. */
4004 ctxt->dst.orig_val64 = ctxt->dst.val64;
4008 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4009 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4010 X86_ICPT_POST_MEMACCESS);
4011 if (rc != X86EMUL_CONTINUE)
4015 if (ctxt->execute) {
4016 rc = ctxt->execute(ctxt);
4017 if (rc != X86EMUL_CONTINUE)
4026 case 0x40 ... 0x47: /* inc r16/r32 */
4027 emulate_1op(ctxt, "inc");
4029 case 0x48 ... 0x4f: /* dec r16/r32 */
4030 emulate_1op(ctxt, "dec");
4032 case 0x63: /* movsxd */
4033 if (ctxt->mode != X86EMUL_MODE_PROT64)
4034 goto cannot_emulate;
4035 ctxt->dst.val = (s32) ctxt->src.val;
4037 case 0x6c: /* insb */
4038 case 0x6d: /* insw/insd */
4039 ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
4041 case 0x6e: /* outsb */
4042 case 0x6f: /* outsw/outsd */
4043 ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
4046 case 0x70 ... 0x7f: /* jcc (short) */
4047 if (test_cc(ctxt->b, ctxt->eflags))
4048 rc = jmp_rel(ctxt, ctxt->src.val);
4050 case 0x8d: /* lea r16/r32, m */
4051 ctxt->dst.val = ctxt->src.addr.mem.ea;
4053 case 0x8f: /* pop (sole member of Grp1a) */
4054 rc = em_grp1a(ctxt);
4056 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4057 if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
4061 case 0x98: /* cbw/cwde/cdqe */
4062 switch (ctxt->op_bytes) {
4063 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4064 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4065 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4071 case 0xcc: /* int3 */
4072 rc = emulate_int(ctxt, 3);
4074 case 0xcd: /* int n */
4075 rc = emulate_int(ctxt, ctxt->src.val);
4077 case 0xce: /* into */
4078 if (ctxt->eflags & EFLG_OF)
4079 rc = emulate_int(ctxt, 4);
4081 case 0xd0 ... 0xd1: /* Grp2 */
4084 case 0xd2 ... 0xd3: /* Grp2 */
4085 ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
4088 case 0xe4: /* inb */
4091 case 0xe6: /* outb */
4092 case 0xe7: /* out */
4094 case 0xe9: /* jmp rel */
4095 case 0xeb: /* jmp rel short */
4096 rc = jmp_rel(ctxt, ctxt->src.val);
4097 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4099 case 0xec: /* in al,dx */
4100 case 0xed: /* in (e/r)ax,dx */
4102 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
4104 goto done; /* IO is needed */
4106 case 0xee: /* out dx,al */
4107 case 0xef: /* out dx,(e/r)ax */
4109 ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
4111 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4113 case 0xf4: /* hlt */
4114 ctxt->ops->halt(ctxt);
4116 case 0xf5: /* cmc */
4117 /* complement carry flag from eflags reg */
4118 ctxt->eflags ^= EFLG_CF;
4120 case 0xf8: /* clc */
4121 ctxt->eflags &= ~EFLG_CF;
4123 case 0xf9: /* stc */
4124 ctxt->eflags |= EFLG_CF;
4126 case 0xfc: /* cld */
4127 ctxt->eflags &= ~EFLG_DF;
4129 case 0xfd: /* std */
4130 ctxt->eflags |= EFLG_DF;
4132 case 0xfe: /* Grp4 */
4133 rc = em_grp45(ctxt);
4135 case 0xff: /* Grp5 */
4136 rc = em_grp45(ctxt);
4139 goto cannot_emulate;
4142 if (rc != X86EMUL_CONTINUE)
4146 rc = writeback(ctxt);
4147 if (rc != X86EMUL_CONTINUE)
4151 * restore dst type in case the decoding will be reused
4152 * (happens for string instruction )
4154 ctxt->dst.type = saved_dst_type;
4156 if ((ctxt->d & SrcMask) == SrcSI)
4157 string_addr_inc(ctxt, seg_override(ctxt),
4158 VCPU_REGS_RSI, &ctxt->src);
4160 if ((ctxt->d & DstMask) == DstDI)
4161 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4164 if (ctxt->rep_prefix && (ctxt->d & String)) {
4165 struct read_cache *r = &ctxt->io_read;
4166 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4168 if (!string_insn_completed(ctxt)) {
4170 * Re-enter guest when pio read ahead buffer is empty
4171 * or, if it is not used, after each 1024 iteration.
4173 if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4174 (r->end == 0 || r->end != r->pos)) {
4176 * Reset read cache. Usually happens before
4177 * decode, but since instruction is restarted
4178 * we have to do it here.
4180 ctxt->mem_read.end = 0;
4181 return EMULATION_RESTART;
4183 goto done; /* skip rip writeback */
4187 ctxt->eip = ctxt->_eip;
4190 if (rc == X86EMUL_PROPAGATE_FAULT)
4191 ctxt->have_exception = true;
4192 if (rc == X86EMUL_INTERCEPTED)
4193 return EMULATION_INTERCEPTED;
4195 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4199 case 0x09: /* wbinvd */
4200 (ctxt->ops->wbinvd)(ctxt);
4202 case 0x08: /* invd */
4203 case 0x0d: /* GrpP (prefetch) */
4204 case 0x18: /* Grp16 (prefetch/nop) */
4206 case 0x20: /* mov cr, reg */
4207 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4209 case 0x21: /* mov from dr to reg */
4210 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4212 case 0x22: /* mov reg, cr */
4213 if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4214 emulate_gp(ctxt, 0);
4215 rc = X86EMUL_PROPAGATE_FAULT;
4218 ctxt->dst.type = OP_NONE;
4220 case 0x23: /* mov from reg to dr */
4221 if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4222 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4223 ~0ULL : ~0U)) < 0) {
4224 /* #UD condition is already handled by the code above */
4225 emulate_gp(ctxt, 0);
4226 rc = X86EMUL_PROPAGATE_FAULT;
4230 ctxt->dst.type = OP_NONE; /* no writeback */
4234 msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4235 | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4236 if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4237 emulate_gp(ctxt, 0);
4238 rc = X86EMUL_PROPAGATE_FAULT;
4241 rc = X86EMUL_CONTINUE;
4245 if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4246 emulate_gp(ctxt, 0);
4247 rc = X86EMUL_PROPAGATE_FAULT;
4250 ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4251 ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4253 rc = X86EMUL_CONTINUE;
4255 case 0x40 ... 0x4f: /* cmov */
4256 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4257 if (!test_cc(ctxt->b, ctxt->eflags))
4258 ctxt->dst.type = OP_NONE; /* no writeback */
4260 case 0x80 ... 0x8f: /* jnz rel, etc*/
4261 if (test_cc(ctxt->b, ctxt->eflags))
4262 rc = jmp_rel(ctxt, ctxt->src.val);
4264 case 0x90 ... 0x9f: /* setcc r/m8 */
4265 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4269 ctxt->dst.type = OP_NONE;
4270 /* only subword offset */
4271 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4272 emulate_2op_SrcV_nobyte(ctxt, "bt");
4274 case 0xa4: /* shld imm8, r, r/m */
4275 case 0xa5: /* shld cl, r, r/m */
4276 emulate_2op_cl(ctxt, "shld");
4280 emulate_2op_SrcV_nobyte(ctxt, "bts");
4282 case 0xac: /* shrd imm8, r, r/m */
4283 case 0xad: /* shrd cl, r, r/m */
4284 emulate_2op_cl(ctxt, "shrd");
4286 case 0xae: /* clflush */
4288 case 0xb0 ... 0xb1: /* cmpxchg */
4290 * Save real source value, then compare EAX against
4293 ctxt->src.orig_val = ctxt->src.val;
4294 ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4295 emulate_2op_SrcV(ctxt, "cmp");
4296 if (ctxt->eflags & EFLG_ZF) {
4297 /* Success: write back to memory. */
4298 ctxt->dst.val = ctxt->src.orig_val;
4300 /* Failure: write the value we saw to EAX. */
4301 ctxt->dst.type = OP_REG;
4302 ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4307 emulate_2op_SrcV_nobyte(ctxt, "btr");
4309 case 0xb6 ... 0xb7: /* movzx */
4310 ctxt->dst.bytes = ctxt->op_bytes;
4311 ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4312 : (u16) ctxt->src.val;
4314 case 0xba: /* Grp8 */
4315 switch (ctxt->modrm_reg & 3) {
4328 emulate_2op_SrcV_nobyte(ctxt, "btc");
4330 case 0xbc: { /* bsf */
4332 __asm__ ("bsf %2, %0; setz %1"
4333 : "=r"(ctxt->dst.val), "=q"(zf)
4334 : "r"(ctxt->src.val));
4335 ctxt->eflags &= ~X86_EFLAGS_ZF;
4337 ctxt->eflags |= X86_EFLAGS_ZF;
4338 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4342 case 0xbd: { /* bsr */
4344 __asm__ ("bsr %2, %0; setz %1"
4345 : "=r"(ctxt->dst.val), "=q"(zf)
4346 : "r"(ctxt->src.val));
4347 ctxt->eflags &= ~X86_EFLAGS_ZF;
4349 ctxt->eflags |= X86_EFLAGS_ZF;
4350 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4354 case 0xbe ... 0xbf: /* movsx */
4355 ctxt->dst.bytes = ctxt->op_bytes;
4356 ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4357 (s16) ctxt->src.val;
4359 case 0xc0 ... 0xc1: /* xadd */
4360 emulate_2op_SrcV(ctxt, "add");
4361 /* Write back the register source. */
4362 ctxt->src.val = ctxt->dst.orig_val;
4363 write_register_operand(&ctxt->src);
4365 case 0xc3: /* movnti */
4366 ctxt->dst.bytes = ctxt->op_bytes;
4367 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4368 (u64) ctxt->src.val;
4370 case 0xc7: /* Grp9 (cmpxchg8b) */
4374 goto cannot_emulate;
4377 if (rc != X86EMUL_CONTINUE)
4383 return EMULATION_FAILED;