1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54 #define OpES 20ull /* ES */
55 #define OpCS 21ull /* CS */
56 #define OpSS 22ull /* SS */
57 #define OpDS 23ull /* DS */
58 #define OpFS 24ull /* FS */
59 #define OpGS 25ull /* GS */
61 #define OpBits 5 /* Width of operand field */
62 #define OpMask ((1ull << OpBits) - 1)
65 * Opcode effective-address decode tables.
66 * Note that we only emulate instructions that have at least one memory
67 * operand (excluding implicit stack references). We assume that stack
68 * references and instruction fetches will never occur in special memory
69 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
73 /* Operand sizes: 8-bit operands or specified/overridden size. */
74 #define ByteOp (1<<0) /* 8-bit operands. */
75 /* Destination operand type. */
77 #define ImplicitOps (OpImplicit << DstShift)
78 #define DstReg (OpReg << DstShift)
79 #define DstMem (OpMem << DstShift)
80 #define DstAcc (OpAcc << DstShift)
81 #define DstDI (OpDI << DstShift)
82 #define DstMem64 (OpMem64 << DstShift)
83 #define DstImmUByte (OpImmUByte << DstShift)
84 #define DstDX (OpDX << DstShift)
85 #define DstMask (OpMask << DstShift)
86 /* Source operand type. */
88 #define SrcNone (OpNone << SrcShift)
89 #define SrcReg (OpReg << SrcShift)
90 #define SrcMem (OpMem << SrcShift)
91 #define SrcMem16 (OpMem16 << SrcShift)
92 #define SrcMem32 (OpMem32 << SrcShift)
93 #define SrcImm (OpImm << SrcShift)
94 #define SrcImmByte (OpImmByte << SrcShift)
95 #define SrcOne (OpOne << SrcShift)
96 #define SrcImmUByte (OpImmUByte << SrcShift)
97 #define SrcImmU (OpImmU << SrcShift)
98 #define SrcSI (OpSI << SrcShift)
99 #define SrcImmFAddr (OpImmFAddr << SrcShift)
100 #define SrcMemFAddr (OpMemFAddr << SrcShift)
101 #define SrcAcc (OpAcc << SrcShift)
102 #define SrcImmU16 (OpImmU16 << SrcShift)
103 #define SrcDX (OpDX << SrcShift)
104 #define SrcMask (OpMask << SrcShift)
105 #define BitOp (1<<11)
106 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
107 #define String (1<<13) /* String instruction (rep capable) */
108 #define Stack (1<<14) /* Stack instruction (push/pop) */
109 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
110 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
111 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
112 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
113 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
114 #define Sse (1<<18) /* SSE Vector instruction */
115 /* Generic ModRM decode. */
116 #define ModRM (1<<19)
117 /* Destination is only written; never read. */
120 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
121 #define VendorSpecific (1<<22) /* Vendor specific instruction */
122 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
123 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
124 #define Undefined (1<<25) /* No Such Instruction */
125 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
126 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
128 /* Source 2 operand type */
129 #define Src2Shift (29)
130 #define Src2None (OpNone << Src2Shift)
131 #define Src2CL (OpCL << Src2Shift)
132 #define Src2ImmByte (OpImmByte << Src2Shift)
133 #define Src2One (OpOne << Src2Shift)
134 #define Src2Imm (OpImm << Src2Shift)
135 #define Src2ES (OpES << Src2Shift)
136 #define Src2CS (OpCS << Src2Shift)
137 #define Src2SS (OpSS << Src2Shift)
138 #define Src2DS (OpDS << Src2Shift)
139 #define Src2FS (OpFS << Src2Shift)
140 #define Src2GS (OpGS << Src2Shift)
141 #define Src2Mask (OpMask << Src2Shift)
143 #define X2(x...) x, x
144 #define X3(x...) X2(x), x
145 #define X4(x...) X2(x), X2(x)
146 #define X5(x...) X4(x), x
147 #define X6(x...) X4(x), X2(x)
148 #define X7(x...) X4(x), X3(x)
149 #define X8(x...) X4(x), X4(x)
150 #define X16(x...) X8(x), X8(x)
156 int (*execute)(struct x86_emulate_ctxt *ctxt);
157 struct opcode *group;
158 struct group_dual *gdual;
159 struct gprefix *gprefix;
161 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
165 struct opcode mod012[8];
166 struct opcode mod3[8];
170 struct opcode pfx_no;
171 struct opcode pfx_66;
172 struct opcode pfx_f2;
173 struct opcode pfx_f3;
176 /* EFLAGS bit definitions. */
177 #define EFLG_ID (1<<21)
178 #define EFLG_VIP (1<<20)
179 #define EFLG_VIF (1<<19)
180 #define EFLG_AC (1<<18)
181 #define EFLG_VM (1<<17)
182 #define EFLG_RF (1<<16)
183 #define EFLG_IOPL (3<<12)
184 #define EFLG_NT (1<<14)
185 #define EFLG_OF (1<<11)
186 #define EFLG_DF (1<<10)
187 #define EFLG_IF (1<<9)
188 #define EFLG_TF (1<<8)
189 #define EFLG_SF (1<<7)
190 #define EFLG_ZF (1<<6)
191 #define EFLG_AF (1<<4)
192 #define EFLG_PF (1<<2)
193 #define EFLG_CF (1<<0)
195 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
196 #define EFLG_RESERVED_ONE_MASK 2
199 * Instruction emulation:
200 * Most instructions are emulated directly via a fragment of inline assembly
201 * code. This allows us to save/restore EFLAGS and thus very easily pick up
202 * any modified flags.
205 #if defined(CONFIG_X86_64)
206 #define _LO32 "k" /* force 32-bit operand */
207 #define _STK "%%rsp" /* stack pointer */
208 #elif defined(__i386__)
209 #define _LO32 "" /* force 32-bit operand */
210 #define _STK "%%esp" /* stack pointer */
214 * These EFLAGS bits are restored from saved value during emulation, and
215 * any changes are written back to the saved value after emulation.
217 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
219 /* Before executing instruction: restore necessary bits in EFLAGS. */
220 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
221 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
222 "movl %"_sav",%"_LO32 _tmp"; " \
225 "movl %"_msk",%"_LO32 _tmp"; " \
226 "andl %"_LO32 _tmp",("_STK"); " \
228 "notl %"_LO32 _tmp"; " \
229 "andl %"_LO32 _tmp",("_STK"); " \
230 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
232 "orl %"_LO32 _tmp",("_STK"); " \
236 /* After executing instruction: write-back necessary bits in EFLAGS. */
237 #define _POST_EFLAGS(_sav, _msk, _tmp) \
238 /* _sav |= EFLAGS & _msk; */ \
241 "andl %"_msk",%"_LO32 _tmp"; " \
242 "orl %"_LO32 _tmp",%"_sav"; "
250 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
252 __asm__ __volatile__ ( \
253 _PRE_EFLAGS("0", "4", "2") \
254 _op _suffix " %"_x"3,%1; " \
255 _POST_EFLAGS("0", "4", "2") \
256 : "=m" ((ctxt)->eflags), \
257 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
259 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
263 /* Raw emulation: instruction has two explicit operands. */
264 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
266 unsigned long _tmp; \
268 switch ((ctxt)->dst.bytes) { \
270 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
273 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
276 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
281 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
283 unsigned long _tmp; \
284 switch ((ctxt)->dst.bytes) { \
286 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
289 __emulate_2op_nobyte(ctxt, _op, \
290 _wx, _wy, _lx, _ly, _qx, _qy); \
295 /* Source operand is byte-sized and may be restricted to just %cl. */
296 #define emulate_2op_SrcB(ctxt, _op) \
297 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
299 /* Source operand is byte, word, long or quad sized. */
300 #define emulate_2op_SrcV(ctxt, _op) \
301 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
303 /* Source operand is word, long or quad sized. */
304 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
305 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
307 /* Instruction has three operands and one operand is stored in ECX register */
308 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
310 unsigned long _tmp; \
311 _type _clv = (ctxt)->src2.val; \
312 _type _srcv = (ctxt)->src.val; \
313 _type _dstv = (ctxt)->dst.val; \
315 __asm__ __volatile__ ( \
316 _PRE_EFLAGS("0", "5", "2") \
317 _op _suffix " %4,%1 \n" \
318 _POST_EFLAGS("0", "5", "2") \
319 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
320 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
323 (ctxt)->src2.val = (unsigned long) _clv; \
324 (ctxt)->src2.val = (unsigned long) _srcv; \
325 (ctxt)->dst.val = (unsigned long) _dstv; \
328 #define emulate_2op_cl(ctxt, _op) \
330 switch ((ctxt)->dst.bytes) { \
332 __emulate_2op_cl(ctxt, _op, "w", u16); \
335 __emulate_2op_cl(ctxt, _op, "l", u32); \
338 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
343 #define __emulate_1op(ctxt, _op, _suffix) \
345 unsigned long _tmp; \
347 __asm__ __volatile__ ( \
348 _PRE_EFLAGS("0", "3", "2") \
349 _op _suffix " %1; " \
350 _POST_EFLAGS("0", "3", "2") \
351 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
353 : "i" (EFLAGS_MASK)); \
356 /* Instruction has only one explicit operand (no source operand). */
357 #define emulate_1op(ctxt, _op) \
359 switch ((ctxt)->dst.bytes) { \
360 case 1: __emulate_1op(ctxt, _op, "b"); break; \
361 case 2: __emulate_1op(ctxt, _op, "w"); break; \
362 case 4: __emulate_1op(ctxt, _op, "l"); break; \
363 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
367 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
369 unsigned long _tmp; \
370 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
371 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
373 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0", "5", "1") \
376 _op _suffix " %6; " \
378 _POST_EFLAGS("0", "5", "1") \
379 ".pushsection .fixup,\"ax\" \n\t" \
380 "3: movb $1, %4 \n\t" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
385 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
386 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
387 "a" (*rax), "d" (*rdx)); \
390 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
391 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
393 switch((ctxt)->src.bytes) { \
395 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
398 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
401 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
404 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
409 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
410 enum x86_intercept intercept,
411 enum x86_intercept_stage stage)
413 struct x86_instruction_info info = {
414 .intercept = intercept,
415 .rep_prefix = ctxt->rep_prefix,
416 .modrm_mod = ctxt->modrm_mod,
417 .modrm_reg = ctxt->modrm_reg,
418 .modrm_rm = ctxt->modrm_rm,
419 .src_val = ctxt->src.val64,
420 .src_bytes = ctxt->src.bytes,
421 .dst_bytes = ctxt->dst.bytes,
422 .ad_bytes = ctxt->ad_bytes,
423 .next_rip = ctxt->eip,
426 return ctxt->ops->intercept(ctxt, &info, stage);
429 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
431 return (1UL << (ctxt->ad_bytes << 3)) - 1;
434 /* Access/update address held in a register, based on addressing mode. */
435 static inline unsigned long
436 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
438 if (ctxt->ad_bytes == sizeof(unsigned long))
441 return reg & ad_mask(ctxt);
444 static inline unsigned long
445 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
447 return address_mask(ctxt, reg);
451 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
453 if (ctxt->ad_bytes == sizeof(unsigned long))
456 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
459 static u32 desc_limit_scaled(struct desc_struct *desc)
461 u32 limit = get_desc_limit(desc);
463 return desc->g ? (limit << 12) | 0xfff : limit;
466 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
468 ctxt->has_seg_override = true;
469 ctxt->seg_override = seg;
472 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
474 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
477 return ctxt->ops->get_cached_segment_base(ctxt, seg);
480 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
482 if (!ctxt->has_seg_override)
485 return ctxt->seg_override;
488 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
489 u32 error, bool valid)
491 ctxt->exception.vector = vec;
492 ctxt->exception.error_code = error;
493 ctxt->exception.error_code_valid = valid;
494 return X86EMUL_PROPAGATE_FAULT;
497 static int emulate_db(struct x86_emulate_ctxt *ctxt)
499 return emulate_exception(ctxt, DB_VECTOR, 0, false);
502 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
504 return emulate_exception(ctxt, GP_VECTOR, err, true);
507 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
509 return emulate_exception(ctxt, SS_VECTOR, err, true);
512 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
514 return emulate_exception(ctxt, UD_VECTOR, 0, false);
517 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
519 return emulate_exception(ctxt, TS_VECTOR, err, true);
522 static int emulate_de(struct x86_emulate_ctxt *ctxt)
524 return emulate_exception(ctxt, DE_VECTOR, 0, false);
527 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
529 return emulate_exception(ctxt, NM_VECTOR, 0, false);
532 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
535 switch (ctxt->op_bytes) {
537 ctxt->_eip = (u16)dst;
540 ctxt->_eip = (u32)dst;
544 if ((cs_l && is_noncanonical_address(dst)) ||
545 (!cs_l && (dst >> 32) != 0))
546 return emulate_gp(ctxt, 0);
551 WARN(1, "unsupported eip assignment size\n");
553 return X86EMUL_CONTINUE;
556 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
558 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
561 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
563 return assign_eip_near(ctxt, ctxt->_eip + rel);
566 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
569 struct desc_struct desc;
571 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
575 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
580 struct desc_struct desc;
582 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
583 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
586 static int __linearize(struct x86_emulate_ctxt *ctxt,
587 struct segmented_address addr,
588 unsigned size, bool write, bool fetch,
591 struct desc_struct desc;
598 la = seg_base(ctxt, addr.seg) + addr.ea;
599 switch (ctxt->mode) {
600 case X86EMUL_MODE_REAL:
602 case X86EMUL_MODE_PROT64:
603 if (((signed long)la << 16) >> 16 != la)
604 return emulate_gp(ctxt, 0);
607 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
611 /* code segment or read-only data segment */
612 if (((desc.type & 8) || !(desc.type & 2)) && write)
614 /* unreadable code segment */
615 if (!fetch && (desc.type & 8) && !(desc.type & 2))
617 lim = desc_limit_scaled(&desc);
618 if ((desc.type & 8) || !(desc.type & 4)) {
619 /* expand-up segment */
620 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
623 /* exapand-down segment */
624 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
626 lim = desc.d ? 0xffffffff : 0xffff;
627 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
630 cpl = ctxt->ops->cpl(ctxt);
633 if (!(desc.type & 8)) {
637 } else if ((desc.type & 8) && !(desc.type & 4)) {
638 /* nonconforming code segment */
641 } else if ((desc.type & 8) && (desc.type & 4)) {
642 /* conforming code segment */
648 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
651 return X86EMUL_CONTINUE;
653 if (addr.seg == VCPU_SREG_SS)
654 return emulate_ss(ctxt, addr.seg);
656 return emulate_gp(ctxt, addr.seg);
659 static int linearize(struct x86_emulate_ctxt *ctxt,
660 struct segmented_address addr,
661 unsigned size, bool write,
664 return __linearize(ctxt, addr, size, write, false, linear);
668 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
669 struct segmented_address addr,
676 rc = linearize(ctxt, addr, size, false, &linear);
677 if (rc != X86EMUL_CONTINUE)
679 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
683 * Fetch the next byte of the instruction being emulated which is pointed to
684 * by ctxt->_eip, then increment ctxt->_eip.
686 * Also prefetch the remaining bytes of the instruction without crossing page
687 * boundary if they are not in fetch_cache yet.
689 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
691 struct fetch_cache *fc = &ctxt->fetch;
695 if (ctxt->_eip == fc->end) {
696 unsigned long linear;
697 struct segmented_address addr = { .seg = VCPU_SREG_CS,
699 cur_size = fc->end - fc->start;
700 size = min(15UL - cur_size,
701 PAGE_SIZE - offset_in_page(ctxt->_eip));
702 rc = __linearize(ctxt, addr, size, false, true, &linear);
703 if (unlikely(rc != X86EMUL_CONTINUE))
705 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
706 size, &ctxt->exception);
707 if (unlikely(rc != X86EMUL_CONTINUE))
711 *dest = fc->data[ctxt->_eip - fc->start];
713 return X86EMUL_CONTINUE;
716 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
717 void *dest, unsigned size)
721 /* x86 instructions are limited to 15 bytes. */
722 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
723 return X86EMUL_UNHANDLEABLE;
725 rc = do_insn_fetch_byte(ctxt, dest++);
726 if (rc != X86EMUL_CONTINUE)
729 return X86EMUL_CONTINUE;
732 /* Fetch next part of the instruction being emulated. */
733 #define insn_fetch(_type, _ctxt) \
734 ({ unsigned long _x; \
735 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
736 if (rc != X86EMUL_CONTINUE) \
741 #define insn_fetch_arr(_arr, _size, _ctxt) \
742 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
743 if (rc != X86EMUL_CONTINUE) \
748 * Given the 'reg' portion of a ModRM byte, and a register block, return a
749 * pointer into the block that addresses the relevant register.
750 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
752 static void *decode_register(u8 modrm_reg, unsigned long *regs,
757 p = ®s[modrm_reg];
758 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
759 p = (unsigned char *)®s[modrm_reg & 3] + 1;
763 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
764 struct segmented_address addr,
765 u16 *size, unsigned long *address, int op_bytes)
772 rc = segmented_read_std(ctxt, addr, size, 2);
773 if (rc != X86EMUL_CONTINUE)
776 rc = segmented_read_std(ctxt, addr, address, op_bytes);
780 static int test_cc(unsigned int condition, unsigned int flags)
784 switch ((condition & 15) >> 1) {
786 rc |= (flags & EFLG_OF);
788 case 1: /* b/c/nae */
789 rc |= (flags & EFLG_CF);
792 rc |= (flags & EFLG_ZF);
795 rc |= (flags & (EFLG_CF|EFLG_ZF));
798 rc |= (flags & EFLG_SF);
801 rc |= (flags & EFLG_PF);
804 rc |= (flags & EFLG_ZF);
807 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
811 /* Odd condition identifiers (lsb == 1) have inverted sense. */
812 return (!!rc ^ (condition & 1));
815 static void fetch_register_operand(struct operand *op)
819 op->val = *(u8 *)op->addr.reg;
822 op->val = *(u16 *)op->addr.reg;
825 op->val = *(u32 *)op->addr.reg;
828 op->val = *(u64 *)op->addr.reg;
833 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
835 ctxt->ops->get_fpu(ctxt);
837 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
838 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
839 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
840 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
841 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
842 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
843 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
844 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
846 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
847 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
848 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
849 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
850 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
851 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
852 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
853 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
857 ctxt->ops->put_fpu(ctxt);
860 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
863 ctxt->ops->get_fpu(ctxt);
865 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
866 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
867 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
868 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
869 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
870 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
871 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
872 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
874 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
875 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
876 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
877 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
878 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
879 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
880 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
881 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
885 ctxt->ops->put_fpu(ctxt);
888 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
892 unsigned reg = ctxt->modrm_reg;
893 int highbyte_regs = ctxt->rex_prefix == 0;
895 if (!(ctxt->d & ModRM))
896 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
902 read_sse_reg(ctxt, &op->vec_val, reg);
907 if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
908 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
911 op->addr.reg = decode_register(reg, ctxt->regs, 0);
912 op->bytes = ctxt->op_bytes;
914 fetch_register_operand(op);
915 op->orig_val = op->val;
918 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
922 int index_reg = 0, base_reg = 0, scale;
923 int rc = X86EMUL_CONTINUE;
926 if (ctxt->rex_prefix) {
927 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
928 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
929 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
932 ctxt->modrm = insn_fetch(u8, ctxt);
933 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
934 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
935 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
936 ctxt->modrm_seg = VCPU_SREG_DS;
938 if (ctxt->modrm_mod == 3) {
940 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
941 op->addr.reg = decode_register(ctxt->modrm_rm,
942 ctxt->regs, ctxt->d & ByteOp);
946 op->addr.xmm = ctxt->modrm_rm;
947 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
950 fetch_register_operand(op);
956 if (ctxt->ad_bytes == 2) {
957 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
958 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
959 unsigned si = ctxt->regs[VCPU_REGS_RSI];
960 unsigned di = ctxt->regs[VCPU_REGS_RDI];
962 /* 16-bit ModR/M decode. */
963 switch (ctxt->modrm_mod) {
965 if (ctxt->modrm_rm == 6)
966 modrm_ea += insn_fetch(u16, ctxt);
969 modrm_ea += insn_fetch(s8, ctxt);
972 modrm_ea += insn_fetch(u16, ctxt);
975 switch (ctxt->modrm_rm) {
995 if (ctxt->modrm_mod != 0)
1002 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1003 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1004 ctxt->modrm_seg = VCPU_SREG_SS;
1005 modrm_ea = (u16)modrm_ea;
1007 /* 32/64-bit ModR/M decode. */
1008 if ((ctxt->modrm_rm & 7) == 4) {
1009 sib = insn_fetch(u8, ctxt);
1010 index_reg |= (sib >> 3) & 7;
1011 base_reg |= sib & 7;
1014 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1015 modrm_ea += insn_fetch(s32, ctxt);
1017 modrm_ea += ctxt->regs[base_reg];
1019 modrm_ea += ctxt->regs[index_reg] << scale;
1020 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1021 if (ctxt->mode == X86EMUL_MODE_PROT64)
1022 ctxt->rip_relative = 1;
1024 modrm_ea += ctxt->regs[ctxt->modrm_rm];
1025 switch (ctxt->modrm_mod) {
1027 if (ctxt->modrm_rm == 5)
1028 modrm_ea += insn_fetch(s32, ctxt);
1031 modrm_ea += insn_fetch(s8, ctxt);
1034 modrm_ea += insn_fetch(s32, ctxt);
1038 op->addr.mem.ea = modrm_ea;
1043 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1046 int rc = X86EMUL_CONTINUE;
1049 switch (ctxt->ad_bytes) {
1051 op->addr.mem.ea = insn_fetch(u16, ctxt);
1054 op->addr.mem.ea = insn_fetch(u32, ctxt);
1057 op->addr.mem.ea = insn_fetch(u64, ctxt);
1064 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1068 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1069 mask = ~(ctxt->dst.bytes * 8 - 1);
1071 if (ctxt->src.bytes == 2)
1072 sv = (s16)ctxt->src.val & (s16)mask;
1073 else if (ctxt->src.bytes == 4)
1074 sv = (s32)ctxt->src.val & (s32)mask;
1076 ctxt->dst.addr.mem.ea += (sv >> 3);
1079 /* only subword offset */
1080 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1083 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1084 unsigned long addr, void *dest, unsigned size)
1087 struct read_cache *mc = &ctxt->mem_read;
1090 int n = min(size, 8u);
1092 if (mc->pos < mc->end)
1095 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1097 if (rc != X86EMUL_CONTINUE)
1102 memcpy(dest, mc->data + mc->pos, n);
1107 return X86EMUL_CONTINUE;
1110 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1111 struct segmented_address addr,
1118 rc = linearize(ctxt, addr, size, false, &linear);
1119 if (rc != X86EMUL_CONTINUE)
1121 return read_emulated(ctxt, linear, data, size);
1124 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1125 struct segmented_address addr,
1132 rc = linearize(ctxt, addr, size, true, &linear);
1133 if (rc != X86EMUL_CONTINUE)
1135 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1139 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1140 struct segmented_address addr,
1141 const void *orig_data, const void *data,
1147 rc = linearize(ctxt, addr, size, true, &linear);
1148 if (rc != X86EMUL_CONTINUE)
1150 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1151 size, &ctxt->exception);
1154 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1155 unsigned int size, unsigned short port,
1158 struct read_cache *rc = &ctxt->io_read;
1160 if (rc->pos == rc->end) { /* refill pio read ahead */
1161 unsigned int in_page, n;
1162 unsigned int count = ctxt->rep_prefix ?
1163 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1164 in_page = (ctxt->eflags & EFLG_DF) ?
1165 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1166 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1167 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1171 rc->pos = rc->end = 0;
1172 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1177 memcpy(dest, rc->data + rc->pos, size);
1182 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1183 u16 selector, struct desc_ptr *dt)
1185 struct x86_emulate_ops *ops = ctxt->ops;
1187 if (selector & 1 << 2) {
1188 struct desc_struct desc;
1191 memset (dt, 0, sizeof *dt);
1192 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1195 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1196 dt->address = get_desc_base(&desc);
1198 ops->get_gdt(ctxt, dt);
1201 /* allowed just for 8 bytes segments */
1202 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1203 u16 selector, struct desc_struct *desc)
1206 u16 index = selector >> 3;
1209 get_descriptor_table_ptr(ctxt, selector, &dt);
1211 if (dt.size < index * 8 + 7)
1212 return emulate_gp(ctxt, selector & 0xfffc);
1214 addr = dt.address + index * 8;
1215 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1219 /* allowed just for 8 bytes segments */
1220 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1221 u16 selector, struct desc_struct *desc)
1224 u16 index = selector >> 3;
1227 get_descriptor_table_ptr(ctxt, selector, &dt);
1229 if (dt.size < index * 8 + 7)
1230 return emulate_gp(ctxt, selector & 0xfffc);
1232 addr = dt.address + index * 8;
1233 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1237 /* Does not support long mode */
1238 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1239 u16 selector, int seg, u8 cpl,
1240 struct desc_struct *desc)
1242 struct desc_struct seg_desc;
1244 unsigned err_vec = GP_VECTOR;
1246 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1249 memset(&seg_desc, 0, sizeof seg_desc);
1251 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1252 || ctxt->mode == X86EMUL_MODE_REAL) {
1253 /* set real mode segment descriptor */
1254 set_desc_base(&seg_desc, selector << 4);
1255 set_desc_limit(&seg_desc, 0xffff);
1262 /* NULL selector is not valid for TR, CS and SS */
1263 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1267 /* TR should be in GDT only */
1268 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1271 if (null_selector) /* for NULL selector skip all following checks */
1274 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1275 if (ret != X86EMUL_CONTINUE)
1278 err_code = selector & 0xfffc;
1279 err_vec = GP_VECTOR;
1281 /* can't load system descriptor into segment selecor */
1282 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1286 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1296 * segment is not a writable data segment or segment
1297 * selector's RPL != CPL or segment selector's RPL != CPL
1299 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1303 if (!(seg_desc.type & 8))
1306 if (seg_desc.type & 4) {
1312 if (rpl > cpl || dpl != cpl)
1315 /* CS(RPL) <- CPL */
1316 selector = (selector & 0xfffc) | cpl;
1319 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1322 case VCPU_SREG_LDTR:
1323 if (seg_desc.s || seg_desc.type != 2)
1326 default: /* DS, ES, FS, or GS */
1328 * segment is not a data or readable code segment or
1329 * ((segment is a data or nonconforming code segment)
1330 * and (both RPL and CPL > DPL))
1332 if ((seg_desc.type & 0xa) == 0x8 ||
1333 (((seg_desc.type & 0xc) != 0xc) &&
1334 (rpl > dpl && cpl > dpl)))
1340 /* mark segment as accessed */
1342 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1343 if (ret != X86EMUL_CONTINUE)
1347 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1350 return X86EMUL_CONTINUE;
1352 emulate_exception(ctxt, err_vec, err_code, true);
1353 return X86EMUL_PROPAGATE_FAULT;
1356 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1357 u16 selector, int seg)
1359 u8 cpl = ctxt->ops->cpl(ctxt);
1360 return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL);
1363 static void write_register_operand(struct operand *op)
1365 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1366 switch (op->bytes) {
1368 *(u8 *)op->addr.reg = (u8)op->val;
1371 *(u16 *)op->addr.reg = (u16)op->val;
1374 *op->addr.reg = (u32)op->val;
1375 break; /* 64b: zero-extend */
1377 *op->addr.reg = op->val;
1382 static int writeback(struct x86_emulate_ctxt *ctxt)
1386 switch (ctxt->dst.type) {
1388 write_register_operand(&ctxt->dst);
1391 if (ctxt->lock_prefix)
1392 rc = segmented_cmpxchg(ctxt,
1394 &ctxt->dst.orig_val,
1398 rc = segmented_write(ctxt,
1402 if (rc != X86EMUL_CONTINUE)
1406 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1414 return X86EMUL_CONTINUE;
1417 static int em_push(struct x86_emulate_ctxt *ctxt)
1419 struct segmented_address addr;
1421 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1422 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1423 addr.seg = VCPU_SREG_SS;
1425 /* Disable writeback. */
1426 ctxt->dst.type = OP_NONE;
1427 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1430 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1431 void *dest, int len)
1434 struct segmented_address addr;
1436 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1437 addr.seg = VCPU_SREG_SS;
1438 rc = segmented_read(ctxt, addr, dest, len);
1439 if (rc != X86EMUL_CONTINUE)
1442 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1446 static int em_pop(struct x86_emulate_ctxt *ctxt)
1448 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1451 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1452 void *dest, int len)
1455 unsigned long val, change_mask;
1456 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1457 int cpl = ctxt->ops->cpl(ctxt);
1459 rc = emulate_pop(ctxt, &val, len);
1460 if (rc != X86EMUL_CONTINUE)
1463 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1464 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1466 switch(ctxt->mode) {
1467 case X86EMUL_MODE_PROT64:
1468 case X86EMUL_MODE_PROT32:
1469 case X86EMUL_MODE_PROT16:
1471 change_mask |= EFLG_IOPL;
1473 change_mask |= EFLG_IF;
1475 case X86EMUL_MODE_VM86:
1477 return emulate_gp(ctxt, 0);
1478 change_mask |= EFLG_IF;
1480 default: /* real mode */
1481 change_mask |= (EFLG_IOPL | EFLG_IF);
1485 *(unsigned long *)dest =
1486 (ctxt->eflags & ~change_mask) | (val & change_mask);
1491 static int em_popf(struct x86_emulate_ctxt *ctxt)
1493 ctxt->dst.type = OP_REG;
1494 ctxt->dst.addr.reg = &ctxt->eflags;
1495 ctxt->dst.bytes = ctxt->op_bytes;
1496 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1499 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1501 int seg = ctxt->src2.val;
1503 ctxt->src.val = get_segment_selector(ctxt, seg);
1505 return em_push(ctxt);
1508 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1510 int seg = ctxt->src2.val;
1511 unsigned long selector;
1514 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1515 if (rc != X86EMUL_CONTINUE)
1518 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1522 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1524 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1525 int rc = X86EMUL_CONTINUE;
1526 int reg = VCPU_REGS_RAX;
1528 while (reg <= VCPU_REGS_RDI) {
1529 (reg == VCPU_REGS_RSP) ?
1530 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1533 if (rc != X86EMUL_CONTINUE)
1542 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1544 ctxt->src.val = (unsigned long)ctxt->eflags;
1545 return em_push(ctxt);
1548 static int em_popa(struct x86_emulate_ctxt *ctxt)
1550 int rc = X86EMUL_CONTINUE;
1551 int reg = VCPU_REGS_RDI;
1553 while (reg >= VCPU_REGS_RAX) {
1554 if (reg == VCPU_REGS_RSP) {
1555 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1560 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1561 if (rc != X86EMUL_CONTINUE)
1568 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1570 struct x86_emulate_ops *ops = ctxt->ops;
1577 /* TODO: Add limit checks */
1578 ctxt->src.val = ctxt->eflags;
1580 if (rc != X86EMUL_CONTINUE)
1583 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1585 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1587 if (rc != X86EMUL_CONTINUE)
1590 ctxt->src.val = ctxt->_eip;
1592 if (rc != X86EMUL_CONTINUE)
1595 ops->get_idt(ctxt, &dt);
1597 eip_addr = dt.address + (irq << 2);
1598 cs_addr = dt.address + (irq << 2) + 2;
1600 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1601 if (rc != X86EMUL_CONTINUE)
1604 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1605 if (rc != X86EMUL_CONTINUE)
1608 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1609 if (rc != X86EMUL_CONTINUE)
1617 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1619 switch(ctxt->mode) {
1620 case X86EMUL_MODE_REAL:
1621 return emulate_int_real(ctxt, irq);
1622 case X86EMUL_MODE_VM86:
1623 case X86EMUL_MODE_PROT16:
1624 case X86EMUL_MODE_PROT32:
1625 case X86EMUL_MODE_PROT64:
1627 /* Protected mode interrupts unimplemented yet */
1628 return X86EMUL_UNHANDLEABLE;
1632 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1634 int rc = X86EMUL_CONTINUE;
1635 unsigned long temp_eip = 0;
1636 unsigned long temp_eflags = 0;
1637 unsigned long cs = 0;
1638 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1639 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1640 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1641 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1643 /* TODO: Add stack limit check */
1645 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1647 if (rc != X86EMUL_CONTINUE)
1650 if (temp_eip & ~0xffff)
1651 return emulate_gp(ctxt, 0);
1653 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1655 if (rc != X86EMUL_CONTINUE)
1658 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1660 if (rc != X86EMUL_CONTINUE)
1663 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1665 if (rc != X86EMUL_CONTINUE)
1668 ctxt->_eip = temp_eip;
1671 if (ctxt->op_bytes == 4)
1672 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1673 else if (ctxt->op_bytes == 2) {
1674 ctxt->eflags &= ~0xffff;
1675 ctxt->eflags |= temp_eflags;
1678 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1679 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1684 static int em_iret(struct x86_emulate_ctxt *ctxt)
1686 switch(ctxt->mode) {
1687 case X86EMUL_MODE_REAL:
1688 return emulate_iret_real(ctxt);
1689 case X86EMUL_MODE_VM86:
1690 case X86EMUL_MODE_PROT16:
1691 case X86EMUL_MODE_PROT32:
1692 case X86EMUL_MODE_PROT64:
1694 /* iret from protected mode unimplemented yet */
1695 return X86EMUL_UNHANDLEABLE;
1699 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1702 unsigned short sel, old_sel;
1703 struct desc_struct old_desc, new_desc;
1704 const struct x86_emulate_ops *ops = ctxt->ops;
1705 u8 cpl = ctxt->ops->cpl(ctxt);
1707 /* Assignment of RIP may only fail in 64-bit mode */
1708 if (ctxt->mode == X86EMUL_MODE_PROT64)
1709 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
1712 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1714 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
1716 if (rc != X86EMUL_CONTINUE)
1719 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1720 if (rc != X86EMUL_CONTINUE) {
1721 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1722 /* assigning eip failed; restore the old cs */
1723 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
1729 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1731 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1734 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1736 switch (ctxt->modrm_reg) {
1738 emulate_2op_SrcB(ctxt, "rol");
1741 emulate_2op_SrcB(ctxt, "ror");
1744 emulate_2op_SrcB(ctxt, "rcl");
1747 emulate_2op_SrcB(ctxt, "rcr");
1749 case 4: /* sal/shl */
1750 case 6: /* sal/shl */
1751 emulate_2op_SrcB(ctxt, "sal");
1754 emulate_2op_SrcB(ctxt, "shr");
1757 emulate_2op_SrcB(ctxt, "sar");
1760 return X86EMUL_CONTINUE;
1763 static int em_not(struct x86_emulate_ctxt *ctxt)
1765 ctxt->dst.val = ~ctxt->dst.val;
1766 return X86EMUL_CONTINUE;
1769 static int em_neg(struct x86_emulate_ctxt *ctxt)
1771 emulate_1op(ctxt, "neg");
1772 return X86EMUL_CONTINUE;
1775 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1779 emulate_1op_rax_rdx(ctxt, "mul", ex);
1780 return X86EMUL_CONTINUE;
1783 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1787 emulate_1op_rax_rdx(ctxt, "imul", ex);
1788 return X86EMUL_CONTINUE;
1791 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1795 emulate_1op_rax_rdx(ctxt, "div", de);
1797 return emulate_de(ctxt);
1798 return X86EMUL_CONTINUE;
1801 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1805 emulate_1op_rax_rdx(ctxt, "idiv", de);
1807 return emulate_de(ctxt);
1808 return X86EMUL_CONTINUE;
1811 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1813 int rc = X86EMUL_CONTINUE;
1815 switch (ctxt->modrm_reg) {
1817 emulate_1op(ctxt, "inc");
1820 emulate_1op(ctxt, "dec");
1822 case 2: /* call near abs */ {
1824 old_eip = ctxt->_eip;
1825 rc = assign_eip_near(ctxt, ctxt->src.val);
1826 if (rc != X86EMUL_CONTINUE)
1828 ctxt->src.val = old_eip;
1832 case 4: /* jmp abs */
1833 rc = assign_eip_near(ctxt, ctxt->src.val);
1835 case 5: /* jmp far */
1836 rc = em_jmp_far(ctxt);
1845 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1847 u64 old = ctxt->dst.orig_val64;
1849 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1850 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1851 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1852 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1853 ctxt->eflags &= ~EFLG_ZF;
1855 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1856 (u32) ctxt->regs[VCPU_REGS_RBX];
1858 ctxt->eflags |= EFLG_ZF;
1860 return X86EMUL_CONTINUE;
1863 static int em_ret(struct x86_emulate_ctxt *ctxt)
1868 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1869 if (rc != X86EMUL_CONTINUE)
1872 return assign_eip_near(ctxt, eip);
1875 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1878 unsigned long eip, cs;
1880 int cpl = ctxt->ops->cpl(ctxt);
1881 struct desc_struct old_desc, new_desc;
1882 const struct x86_emulate_ops *ops = ctxt->ops;
1884 if (ctxt->mode == X86EMUL_MODE_PROT64)
1885 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
1888 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1889 if (rc != X86EMUL_CONTINUE)
1891 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1892 if (rc != X86EMUL_CONTINUE)
1894 /* Outer-privilege level return is not implemented */
1895 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
1896 return X86EMUL_UNHANDLEABLE;
1897 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0,
1899 if (rc != X86EMUL_CONTINUE)
1901 rc = assign_eip_far(ctxt, eip, new_desc.l);
1902 if (rc != X86EMUL_CONTINUE) {
1903 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1904 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
1909 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1911 int seg = ctxt->src2.val;
1915 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1917 rc = load_segment_descriptor(ctxt, sel, seg);
1918 if (rc != X86EMUL_CONTINUE)
1921 ctxt->dst.val = ctxt->src.val;
1926 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1927 struct desc_struct *cs, struct desc_struct *ss)
1931 memset(cs, 0, sizeof(struct desc_struct));
1932 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1933 memset(ss, 0, sizeof(struct desc_struct));
1935 cs->l = 0; /* will be adjusted later */
1936 set_desc_base(cs, 0); /* flat segment */
1937 cs->g = 1; /* 4kb granularity */
1938 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1939 cs->type = 0x0b; /* Read, Execute, Accessed */
1941 cs->dpl = 0; /* will be adjusted later */
1945 set_desc_base(ss, 0); /* flat segment */
1946 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1947 ss->g = 1; /* 4kb granularity */
1949 ss->type = 0x03; /* Read/Write, Accessed */
1950 ss->d = 1; /* 32bit stack segment */
1955 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1957 struct x86_emulate_ops *ops = ctxt->ops;
1958 u32 eax, ebx, ecx, edx;
1961 * syscall should always be enabled in longmode - so only become
1962 * vendor specific (cpuid) if other modes are active...
1964 if (ctxt->mode == X86EMUL_MODE_PROT64)
1969 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1971 * Intel ("GenuineIntel")
1972 * remark: Intel CPUs only support "syscall" in 64bit
1973 * longmode. Also an 64bit guest with a
1974 * 32bit compat-app running will #UD !! While this
1975 * behaviour can be fixed (by emulating) into AMD
1976 * response - CPUs of AMD can't behave like Intel.
1978 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1979 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1980 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1983 /* AMD ("AuthenticAMD") */
1984 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1985 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1986 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
1989 /* AMD ("AMDisbetter!") */
1990 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
1991 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
1992 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
1996 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2000 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2002 struct x86_emulate_ops *ops = ctxt->ops;
2003 struct desc_struct cs, ss;
2008 /* syscall is not available in real mode */
2009 if (ctxt->mode == X86EMUL_MODE_REAL ||
2010 ctxt->mode == X86EMUL_MODE_VM86)
2011 return emulate_ud(ctxt);
2013 if (!(em_syscall_is_enabled(ctxt)))
2014 return emulate_ud(ctxt);
2016 ops->get_msr(ctxt, MSR_EFER, &efer);
2017 setup_syscalls_segments(ctxt, &cs, &ss);
2019 if (!(efer & EFER_SCE))
2020 return emulate_ud(ctxt);
2022 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2024 cs_sel = (u16)(msr_data & 0xfffc);
2025 ss_sel = (u16)(msr_data + 8);
2027 if (efer & EFER_LMA) {
2031 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2032 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2034 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2035 if (efer & EFER_LMA) {
2036 #ifdef CONFIG_X86_64
2037 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2040 ctxt->mode == X86EMUL_MODE_PROT64 ?
2041 MSR_LSTAR : MSR_CSTAR, &msr_data);
2042 ctxt->_eip = msr_data;
2044 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2045 ctxt->eflags &= ~(msr_data | EFLG_RF);
2049 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2050 ctxt->_eip = (u32)msr_data;
2052 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2055 return X86EMUL_CONTINUE;
2058 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2060 struct x86_emulate_ops *ops = ctxt->ops;
2061 struct desc_struct cs, ss;
2066 ops->get_msr(ctxt, MSR_EFER, &efer);
2067 /* inject #GP if in real mode */
2068 if (ctxt->mode == X86EMUL_MODE_REAL)
2069 return emulate_gp(ctxt, 0);
2071 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2072 * Therefore, we inject an #UD.
2074 if (ctxt->mode == X86EMUL_MODE_PROT64)
2075 return emulate_ud(ctxt);
2077 setup_syscalls_segments(ctxt, &cs, &ss);
2079 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2080 switch (ctxt->mode) {
2081 case X86EMUL_MODE_PROT32:
2082 if ((msr_data & 0xfffc) == 0x0)
2083 return emulate_gp(ctxt, 0);
2085 case X86EMUL_MODE_PROT64:
2086 if (msr_data == 0x0)
2087 return emulate_gp(ctxt, 0);
2091 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2092 cs_sel = (u16)msr_data;
2093 cs_sel &= ~SELECTOR_RPL_MASK;
2094 ss_sel = cs_sel + 8;
2095 ss_sel &= ~SELECTOR_RPL_MASK;
2096 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2101 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2102 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2104 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2105 ctxt->_eip = msr_data;
2107 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2108 ctxt->regs[VCPU_REGS_RSP] = msr_data;
2110 return X86EMUL_CONTINUE;
2113 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2115 struct x86_emulate_ops *ops = ctxt->ops;
2116 struct desc_struct cs, ss;
2117 u64 msr_data, rcx, rdx;
2119 u16 cs_sel = 0, ss_sel = 0;
2121 /* inject #GP if in real mode or Virtual 8086 mode */
2122 if (ctxt->mode == X86EMUL_MODE_REAL ||
2123 ctxt->mode == X86EMUL_MODE_VM86)
2124 return emulate_gp(ctxt, 0);
2126 setup_syscalls_segments(ctxt, &cs, &ss);
2128 if ((ctxt->rex_prefix & 0x8) != 0x0)
2129 usermode = X86EMUL_MODE_PROT64;
2131 usermode = X86EMUL_MODE_PROT32;
2133 rcx = ctxt->regs[VCPU_REGS_RCX];
2134 rdx = ctxt->regs[VCPU_REGS_RDX];
2138 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2140 case X86EMUL_MODE_PROT32:
2141 cs_sel = (u16)(msr_data + 16);
2142 if ((msr_data & 0xfffc) == 0x0)
2143 return emulate_gp(ctxt, 0);
2144 ss_sel = (u16)(msr_data + 24);
2146 case X86EMUL_MODE_PROT64:
2147 cs_sel = (u16)(msr_data + 32);
2148 if (msr_data == 0x0)
2149 return emulate_gp(ctxt, 0);
2150 ss_sel = cs_sel + 8;
2153 if (is_noncanonical_address(rcx) ||
2154 is_noncanonical_address(rdx))
2155 return emulate_gp(ctxt, 0);
2158 cs_sel |= SELECTOR_RPL_MASK;
2159 ss_sel |= SELECTOR_RPL_MASK;
2161 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2162 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2165 ctxt->regs[VCPU_REGS_RSP] = rcx;
2167 return X86EMUL_CONTINUE;
2170 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2173 if (ctxt->mode == X86EMUL_MODE_REAL)
2175 if (ctxt->mode == X86EMUL_MODE_VM86)
2177 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2178 return ctxt->ops->cpl(ctxt) > iopl;
2181 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2184 struct x86_emulate_ops *ops = ctxt->ops;
2185 struct desc_struct tr_seg;
2188 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2189 unsigned mask = (1 << len) - 1;
2192 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2195 if (desc_limit_scaled(&tr_seg) < 103)
2197 base = get_desc_base(&tr_seg);
2198 #ifdef CONFIG_X86_64
2199 base |= ((u64)base3) << 32;
2201 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2202 if (r != X86EMUL_CONTINUE)
2204 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2206 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2207 if (r != X86EMUL_CONTINUE)
2209 if ((perm >> bit_idx) & mask)
2214 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2220 if (emulator_bad_iopl(ctxt))
2221 if (!emulator_io_port_access_allowed(ctxt, port, len))
2224 ctxt->perm_ok = true;
2229 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2230 struct tss_segment_16 *tss)
2232 tss->ip = ctxt->_eip;
2233 tss->flag = ctxt->eflags;
2234 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2235 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2236 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2237 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2238 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2239 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2240 tss->si = ctxt->regs[VCPU_REGS_RSI];
2241 tss->di = ctxt->regs[VCPU_REGS_RDI];
2243 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2244 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2245 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2246 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2247 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2250 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2251 struct tss_segment_16 *tss)
2256 ctxt->_eip = tss->ip;
2257 ctxt->eflags = tss->flag | 2;
2258 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2259 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2260 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2261 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2262 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2263 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2264 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2265 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2268 * SDM says that segment selectors are loaded before segment
2271 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2272 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2273 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2274 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2275 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2280 * Now load segment descriptors. If fault happenes at this stage
2281 * it is handled in a context of new task
2283 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2285 if (ret != X86EMUL_CONTINUE)
2287 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2289 if (ret != X86EMUL_CONTINUE)
2291 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2293 if (ret != X86EMUL_CONTINUE)
2295 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2297 if (ret != X86EMUL_CONTINUE)
2299 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2301 if (ret != X86EMUL_CONTINUE)
2304 return X86EMUL_CONTINUE;
2307 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2308 u16 tss_selector, u16 old_tss_sel,
2309 ulong old_tss_base, struct desc_struct *new_desc)
2311 struct x86_emulate_ops *ops = ctxt->ops;
2312 struct tss_segment_16 tss_seg;
2314 u32 new_tss_base = get_desc_base(new_desc);
2316 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2318 if (ret != X86EMUL_CONTINUE)
2319 /* FIXME: need to provide precise fault address */
2322 save_state_to_tss16(ctxt, &tss_seg);
2324 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2326 if (ret != X86EMUL_CONTINUE)
2327 /* FIXME: need to provide precise fault address */
2330 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2332 if (ret != X86EMUL_CONTINUE)
2333 /* FIXME: need to provide precise fault address */
2336 if (old_tss_sel != 0xffff) {
2337 tss_seg.prev_task_link = old_tss_sel;
2339 ret = ops->write_std(ctxt, new_tss_base,
2340 &tss_seg.prev_task_link,
2341 sizeof tss_seg.prev_task_link,
2343 if (ret != X86EMUL_CONTINUE)
2344 /* FIXME: need to provide precise fault address */
2348 return load_state_from_tss16(ctxt, &tss_seg);
2351 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2352 struct tss_segment_32 *tss)
2354 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2355 tss->eip = ctxt->_eip;
2356 tss->eflags = ctxt->eflags;
2357 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2358 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2359 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2360 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2361 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2362 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2363 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2364 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2366 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2367 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2368 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2369 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2370 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2371 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2372 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2375 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2376 struct tss_segment_32 *tss)
2381 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2382 return emulate_gp(ctxt, 0);
2383 ctxt->_eip = tss->eip;
2384 ctxt->eflags = tss->eflags | 2;
2385 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2386 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2387 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2388 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2389 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2390 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2391 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2392 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2395 * SDM says that segment selectors are loaded before segment
2396 * descriptors. This is important because CPL checks will
2399 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2400 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2401 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2402 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2403 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2404 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2405 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2410 * Now load segment descriptors. If fault happenes at this stage
2411 * it is handled in a context of new task
2413 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2415 if (ret != X86EMUL_CONTINUE)
2417 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2419 if (ret != X86EMUL_CONTINUE)
2421 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2423 if (ret != X86EMUL_CONTINUE)
2425 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2427 if (ret != X86EMUL_CONTINUE)
2429 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2431 if (ret != X86EMUL_CONTINUE)
2433 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2435 if (ret != X86EMUL_CONTINUE)
2437 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2439 if (ret != X86EMUL_CONTINUE)
2442 return X86EMUL_CONTINUE;
2445 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2446 u16 tss_selector, u16 old_tss_sel,
2447 ulong old_tss_base, struct desc_struct *new_desc)
2449 struct x86_emulate_ops *ops = ctxt->ops;
2450 struct tss_segment_32 tss_seg;
2452 u32 new_tss_base = get_desc_base(new_desc);
2454 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2456 if (ret != X86EMUL_CONTINUE)
2457 /* FIXME: need to provide precise fault address */
2460 save_state_to_tss32(ctxt, &tss_seg);
2462 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2464 if (ret != X86EMUL_CONTINUE)
2465 /* FIXME: need to provide precise fault address */
2468 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2470 if (ret != X86EMUL_CONTINUE)
2471 /* FIXME: need to provide precise fault address */
2474 if (old_tss_sel != 0xffff) {
2475 tss_seg.prev_task_link = old_tss_sel;
2477 ret = ops->write_std(ctxt, new_tss_base,
2478 &tss_seg.prev_task_link,
2479 sizeof tss_seg.prev_task_link,
2481 if (ret != X86EMUL_CONTINUE)
2482 /* FIXME: need to provide precise fault address */
2486 return load_state_from_tss32(ctxt, &tss_seg);
2489 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2490 u16 tss_selector, int reason,
2491 bool has_error_code, u32 error_code)
2493 struct x86_emulate_ops *ops = ctxt->ops;
2494 struct desc_struct curr_tss_desc, next_tss_desc;
2496 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2497 ulong old_tss_base =
2498 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2501 /* FIXME: old_tss_base == ~0 ? */
2503 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2504 if (ret != X86EMUL_CONTINUE)
2506 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2507 if (ret != X86EMUL_CONTINUE)
2510 /* FIXME: check that next_tss_desc is tss */
2512 if (reason != TASK_SWITCH_IRET) {
2513 if ((tss_selector & 3) > next_tss_desc.dpl ||
2514 ops->cpl(ctxt) > next_tss_desc.dpl)
2515 return emulate_gp(ctxt, 0);
2518 desc_limit = desc_limit_scaled(&next_tss_desc);
2519 if (!next_tss_desc.p ||
2520 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2521 desc_limit < 0x2b)) {
2522 emulate_ts(ctxt, tss_selector & 0xfffc);
2523 return X86EMUL_PROPAGATE_FAULT;
2526 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2527 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2528 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2531 if (reason == TASK_SWITCH_IRET)
2532 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2534 /* set back link to prev task only if NT bit is set in eflags
2535 note that old_tss_sel is not used afetr this point */
2536 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2537 old_tss_sel = 0xffff;
2539 if (next_tss_desc.type & 8)
2540 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2541 old_tss_base, &next_tss_desc);
2543 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2544 old_tss_base, &next_tss_desc);
2545 if (ret != X86EMUL_CONTINUE)
2548 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2549 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2551 if (reason != TASK_SWITCH_IRET) {
2552 next_tss_desc.type |= (1 << 1); /* set busy flag */
2553 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2556 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2557 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2559 if (has_error_code) {
2560 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2561 ctxt->lock_prefix = 0;
2562 ctxt->src.val = (unsigned long) error_code;
2563 ret = em_push(ctxt);
2569 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2570 u16 tss_selector, int reason,
2571 bool has_error_code, u32 error_code)
2575 ctxt->_eip = ctxt->eip;
2576 ctxt->dst.type = OP_NONE;
2578 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2579 has_error_code, error_code);
2581 if (rc == X86EMUL_CONTINUE)
2582 ctxt->eip = ctxt->_eip;
2584 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2587 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2588 int reg, struct operand *op)
2590 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2592 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2593 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2594 op->addr.mem.seg = seg;
2597 static int em_das(struct x86_emulate_ctxt *ctxt)
2600 bool af, cf, old_cf;
2602 cf = ctxt->eflags & X86_EFLAGS_CF;
2608 af = ctxt->eflags & X86_EFLAGS_AF;
2609 if ((al & 0x0f) > 9 || af) {
2611 cf = old_cf | (al >= 250);
2616 if (old_al > 0x99 || old_cf) {
2622 /* Set PF, ZF, SF */
2623 ctxt->src.type = OP_IMM;
2625 ctxt->src.bytes = 1;
2626 emulate_2op_SrcV(ctxt, "or");
2627 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2629 ctxt->eflags |= X86_EFLAGS_CF;
2631 ctxt->eflags |= X86_EFLAGS_AF;
2632 return X86EMUL_CONTINUE;
2635 static int em_call(struct x86_emulate_ctxt *ctxt)
2638 long rel = ctxt->src.val;
2640 ctxt->src.val = (unsigned long)ctxt->_eip;
2641 rc = jmp_rel(ctxt, rel);
2642 if (rc != X86EMUL_CONTINUE)
2644 return em_push(ctxt);
2647 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2652 struct desc_struct old_desc, new_desc;
2653 const struct x86_emulate_ops *ops = ctxt->ops;
2654 int cpl = ctxt->ops->cpl(ctxt);
2656 old_eip = ctxt->_eip;
2657 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
2659 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2660 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2662 if (rc != X86EMUL_CONTINUE)
2663 return X86EMUL_CONTINUE;
2665 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2666 if (rc != X86EMUL_CONTINUE)
2669 ctxt->src.val = old_cs;
2671 if (rc != X86EMUL_CONTINUE)
2674 ctxt->src.val = old_eip;
2676 /* If we failed, we tainted the memory, but the very least we should
2678 if (rc != X86EMUL_CONTINUE)
2682 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2687 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2692 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2693 if (rc != X86EMUL_CONTINUE)
2695 rc = assign_eip_near(ctxt, eip);
2696 if (rc != X86EMUL_CONTINUE)
2698 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2699 return X86EMUL_CONTINUE;
2702 static int em_add(struct x86_emulate_ctxt *ctxt)
2704 emulate_2op_SrcV(ctxt, "add");
2705 return X86EMUL_CONTINUE;
2708 static int em_or(struct x86_emulate_ctxt *ctxt)
2710 emulate_2op_SrcV(ctxt, "or");
2711 return X86EMUL_CONTINUE;
2714 static int em_adc(struct x86_emulate_ctxt *ctxt)
2716 emulate_2op_SrcV(ctxt, "adc");
2717 return X86EMUL_CONTINUE;
2720 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2722 emulate_2op_SrcV(ctxt, "sbb");
2723 return X86EMUL_CONTINUE;
2726 static int em_and(struct x86_emulate_ctxt *ctxt)
2728 emulate_2op_SrcV(ctxt, "and");
2729 return X86EMUL_CONTINUE;
2732 static int em_sub(struct x86_emulate_ctxt *ctxt)
2734 emulate_2op_SrcV(ctxt, "sub");
2735 return X86EMUL_CONTINUE;
2738 static int em_xor(struct x86_emulate_ctxt *ctxt)
2740 emulate_2op_SrcV(ctxt, "xor");
2741 return X86EMUL_CONTINUE;
2744 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2746 emulate_2op_SrcV(ctxt, "cmp");
2747 /* Disable writeback. */
2748 ctxt->dst.type = OP_NONE;
2749 return X86EMUL_CONTINUE;
2752 static int em_test(struct x86_emulate_ctxt *ctxt)
2754 emulate_2op_SrcV(ctxt, "test");
2755 /* Disable writeback. */
2756 ctxt->dst.type = OP_NONE;
2757 return X86EMUL_CONTINUE;
2760 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2762 /* Write back the register source. */
2763 ctxt->src.val = ctxt->dst.val;
2764 write_register_operand(&ctxt->src);
2766 /* Write back the memory destination with implicit LOCK prefix. */
2767 ctxt->dst.val = ctxt->src.orig_val;
2768 ctxt->lock_prefix = 1;
2769 return X86EMUL_CONTINUE;
2772 static int em_imul(struct x86_emulate_ctxt *ctxt)
2774 emulate_2op_SrcV_nobyte(ctxt, "imul");
2775 return X86EMUL_CONTINUE;
2778 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2780 ctxt->dst.val = ctxt->src2.val;
2781 return em_imul(ctxt);
2784 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2786 ctxt->dst.type = OP_REG;
2787 ctxt->dst.bytes = ctxt->src.bytes;
2788 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2789 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2791 return X86EMUL_CONTINUE;
2794 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2798 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2799 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2800 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2801 return X86EMUL_CONTINUE;
2804 static int em_mov(struct x86_emulate_ctxt *ctxt)
2806 ctxt->dst.val = ctxt->src.val;
2807 return X86EMUL_CONTINUE;
2810 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2812 if (ctxt->modrm_reg > VCPU_SREG_GS)
2813 return emulate_ud(ctxt);
2815 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2816 return X86EMUL_CONTINUE;
2819 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2821 u16 sel = ctxt->src.val;
2823 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2824 return emulate_ud(ctxt);
2826 if (ctxt->modrm_reg == VCPU_SREG_SS)
2827 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2829 /* Disable writeback. */
2830 ctxt->dst.type = OP_NONE;
2831 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2834 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2836 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2837 return X86EMUL_CONTINUE;
2840 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2845 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2846 if (rc == X86EMUL_CONTINUE)
2847 ctxt->ops->invlpg(ctxt, linear);
2848 /* Disable writeback. */
2849 ctxt->dst.type = OP_NONE;
2850 return X86EMUL_CONTINUE;
2853 static int em_clts(struct x86_emulate_ctxt *ctxt)
2857 cr0 = ctxt->ops->get_cr(ctxt, 0);
2859 ctxt->ops->set_cr(ctxt, 0, cr0);
2860 return X86EMUL_CONTINUE;
2863 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2867 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2868 return X86EMUL_UNHANDLEABLE;
2870 rc = ctxt->ops->fix_hypercall(ctxt);
2871 if (rc != X86EMUL_CONTINUE)
2874 /* Let the processor re-execute the fixed hypercall */
2875 ctxt->_eip = ctxt->eip;
2876 /* Disable writeback. */
2877 ctxt->dst.type = OP_NONE;
2878 return X86EMUL_CONTINUE;
2881 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2883 struct desc_ptr desc_ptr;
2886 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2887 &desc_ptr.size, &desc_ptr.address,
2889 if (rc != X86EMUL_CONTINUE)
2891 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2892 /* Disable writeback. */
2893 ctxt->dst.type = OP_NONE;
2894 return X86EMUL_CONTINUE;
2897 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2901 rc = ctxt->ops->fix_hypercall(ctxt);
2903 /* Disable writeback. */
2904 ctxt->dst.type = OP_NONE;
2908 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2910 struct desc_ptr desc_ptr;
2913 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2914 &desc_ptr.size, &desc_ptr.address,
2916 if (rc != X86EMUL_CONTINUE)
2918 ctxt->ops->set_idt(ctxt, &desc_ptr);
2919 /* Disable writeback. */
2920 ctxt->dst.type = OP_NONE;
2921 return X86EMUL_CONTINUE;
2924 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2926 ctxt->dst.bytes = 2;
2927 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2928 return X86EMUL_CONTINUE;
2931 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2933 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2934 | (ctxt->src.val & 0x0f));
2935 ctxt->dst.type = OP_NONE;
2936 return X86EMUL_CONTINUE;
2939 static int em_loop(struct x86_emulate_ctxt *ctxt)
2941 int rc = X86EMUL_CONTINUE;
2943 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2944 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2945 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2946 rc = jmp_rel(ctxt, ctxt->src.val);
2951 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2953 int rc = X86EMUL_CONTINUE;
2955 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2956 rc = jmp_rel(ctxt, ctxt->src.val);
2961 static int em_cli(struct x86_emulate_ctxt *ctxt)
2963 if (emulator_bad_iopl(ctxt))
2964 return emulate_gp(ctxt, 0);
2966 ctxt->eflags &= ~X86_EFLAGS_IF;
2967 return X86EMUL_CONTINUE;
2970 static int em_sti(struct x86_emulate_ctxt *ctxt)
2972 if (emulator_bad_iopl(ctxt))
2973 return emulate_gp(ctxt, 0);
2975 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2976 ctxt->eflags |= X86_EFLAGS_IF;
2977 return X86EMUL_CONTINUE;
2980 static bool valid_cr(int nr)
2992 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2994 if (!valid_cr(ctxt->modrm_reg))
2995 return emulate_ud(ctxt);
2997 return X86EMUL_CONTINUE;
3000 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3002 u64 new_val = ctxt->src.val64;
3003 int cr = ctxt->modrm_reg;
3006 static u64 cr_reserved_bits[] = {
3007 0xffffffff00000000ULL,
3008 0, 0, 0, /* CR3 checked later */
3015 return emulate_ud(ctxt);
3017 if (new_val & cr_reserved_bits[cr])
3018 return emulate_gp(ctxt, 0);
3023 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3024 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3025 return emulate_gp(ctxt, 0);
3027 cr4 = ctxt->ops->get_cr(ctxt, 4);
3028 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3030 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3031 !(cr4 & X86_CR4_PAE))
3032 return emulate_gp(ctxt, 0);
3039 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3040 if (efer & EFER_LMA)
3041 rsvd = CR3_L_MODE_RESERVED_BITS;
3042 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3043 rsvd = CR3_PAE_RESERVED_BITS;
3044 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3045 rsvd = CR3_NONPAE_RESERVED_BITS;
3048 return emulate_gp(ctxt, 0);
3055 cr4 = ctxt->ops->get_cr(ctxt, 4);
3056 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3058 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3059 return emulate_gp(ctxt, 0);
3065 return X86EMUL_CONTINUE;
3068 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3072 ctxt->ops->get_dr(ctxt, 7, &dr7);
3074 /* Check if DR7.Global_Enable is set */
3075 return dr7 & (1 << 13);
3078 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3080 int dr = ctxt->modrm_reg;
3084 return emulate_ud(ctxt);
3086 cr4 = ctxt->ops->get_cr(ctxt, 4);
3087 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3088 return emulate_ud(ctxt);
3090 if (check_dr7_gd(ctxt))
3091 return emulate_db(ctxt);
3093 return X86EMUL_CONTINUE;
3096 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3098 u64 new_val = ctxt->src.val64;
3099 int dr = ctxt->modrm_reg;
3101 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3102 return emulate_gp(ctxt, 0);
3104 return check_dr_read(ctxt);
3107 static int check_svme(struct x86_emulate_ctxt *ctxt)
3111 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3113 if (!(efer & EFER_SVME))
3114 return emulate_ud(ctxt);
3116 return X86EMUL_CONTINUE;
3119 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3121 u64 rax = ctxt->regs[VCPU_REGS_RAX];
3123 /* Valid physical address? */
3124 if (rax & 0xffff000000000000ULL)
3125 return emulate_gp(ctxt, 0);
3127 return check_svme(ctxt);
3130 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3132 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3134 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3135 return emulate_ud(ctxt);
3137 return X86EMUL_CONTINUE;
3140 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3142 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3143 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3145 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3147 return emulate_gp(ctxt, 0);
3149 return X86EMUL_CONTINUE;
3152 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3154 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3155 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3156 return emulate_gp(ctxt, 0);
3158 return X86EMUL_CONTINUE;
3161 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3163 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3164 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3165 return emulate_gp(ctxt, 0);
3167 return X86EMUL_CONTINUE;
3170 #define D(_y) { .flags = (_y) }
3171 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3172 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3173 .check_perm = (_p) }
3175 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3176 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3177 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3178 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3179 #define II(_f, _e, _i) \
3180 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3181 #define IIP(_f, _e, _i, _p) \
3182 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3183 .check_perm = (_p) }
3184 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3186 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3187 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3188 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3190 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3191 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3192 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3194 static struct opcode group7_rm1[] = {
3195 DI(SrcNone | ModRM | Priv, monitor),
3196 DI(SrcNone | ModRM | Priv, mwait),
3200 static struct opcode group7_rm3[] = {
3201 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3202 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3203 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3204 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3205 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3206 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3207 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3208 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3211 static struct opcode group7_rm7[] = {
3213 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3217 static struct opcode group1[] = {
3228 static struct opcode group1A[] = {
3229 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3232 static struct opcode group3[] = {
3233 I(DstMem | SrcImm | ModRM, em_test),
3234 I(DstMem | SrcImm | ModRM, em_test),
3235 I(DstMem | SrcNone | ModRM | Lock, em_not),
3236 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3237 I(SrcMem | ModRM, em_mul_ex),
3238 I(SrcMem | ModRM, em_imul_ex),
3239 I(SrcMem | ModRM, em_div_ex),
3240 I(SrcMem | ModRM, em_idiv_ex),
3243 static struct opcode group4[] = {
3244 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3248 static struct opcode group5[] = {
3249 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3250 D(SrcMem | ModRM | Stack),
3251 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3252 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3253 D(SrcMem | ModRM | Stack), N,
3256 static struct opcode group6[] = {
3257 DI(ModRM | Prot, sldt),
3258 DI(ModRM | Prot, str),
3259 DI(ModRM | Prot | Priv, lldt),
3260 DI(ModRM | Prot | Priv, ltr),
3264 static struct group_dual group7 = { {
3265 DI(ModRM | Mov | DstMem | Priv, sgdt),
3266 DI(ModRM | Mov | DstMem | Priv, sidt),
3267 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3268 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3269 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3270 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3271 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3273 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3275 N, EXT(0, group7_rm3),
3276 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3277 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3280 static struct opcode group8[] = {
3282 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3283 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3286 static struct group_dual group9 = { {
3287 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3289 N, N, N, N, N, N, N, N,
3292 static struct opcode group11[] = {
3293 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3296 static struct gprefix pfx_0f_6f_0f_7f = {
3297 N, N, N, I(Sse, em_movdqu),
3300 static struct opcode opcode_table[256] = {
3302 I6ALU(Lock, em_add),
3303 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3304 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3307 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3310 I6ALU(Lock, em_adc),
3311 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3312 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3314 I6ALU(Lock, em_sbb),
3315 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3316 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3318 I6ALU(Lock, em_and), N, N,
3320 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3322 I6ALU(Lock, em_xor), N, N,
3324 I6ALU(0, em_cmp), N, N,
3328 X8(I(SrcReg | Stack, em_push)),
3330 X8(I(DstReg | Stack, em_pop)),
3332 I(ImplicitOps | Stack | No64, em_pusha),
3333 I(ImplicitOps | Stack | No64, em_popa),
3334 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3337 I(SrcImm | Mov | Stack, em_push),
3338 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3339 I(SrcImmByte | Mov | Stack, em_push),
3340 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3341 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3342 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3346 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3347 G(DstMem | SrcImm | ModRM | Group, group1),
3348 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3349 G(DstMem | SrcImmByte | ModRM | Group, group1),
3350 I2bv(DstMem | SrcReg | ModRM, em_test),
3351 I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3353 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3354 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3355 I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3356 D(ModRM | SrcMem | NoAccess | DstReg),
3357 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3360 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3362 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3363 I(SrcImmFAddr | No64, em_call_far), N,
3364 II(ImplicitOps | Stack, em_pushf, pushf),
3365 II(ImplicitOps | Stack, em_popf, popf), N, N,
3367 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3368 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3369 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3370 I2bv(SrcSI | DstDI | String, em_cmp),
3372 I2bv(DstAcc | SrcImm, em_test),
3373 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3374 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3375 I2bv(SrcAcc | DstDI | String, em_cmp),
3377 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3379 X8(I(DstReg | SrcImm | Mov, em_mov)),
3381 D2bv(DstMem | SrcImmByte | ModRM),
3382 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3383 I(ImplicitOps | Stack, em_ret),
3384 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3385 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3386 G(ByteOp, group11), G(0, group11),
3388 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3389 D(ImplicitOps), DI(SrcImmByte, intn),
3390 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3392 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3395 N, N, N, N, N, N, N, N,
3397 X3(I(SrcImmByte, em_loop)),
3398 I(SrcImmByte, em_jcxz),
3399 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3400 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3402 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3403 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3404 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3405 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3407 N, DI(ImplicitOps, icebp), N, N,
3408 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3409 G(ByteOp, group3), G(0, group3),
3411 D(ImplicitOps), D(ImplicitOps),
3412 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3413 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3416 static struct opcode twobyte_table[256] = {
3418 G(0, group6), GD(0, &group7), N, N,
3419 N, I(ImplicitOps | VendorSpecific, em_syscall),
3420 II(ImplicitOps | Priv, em_clts, clts), N,
3421 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3422 N, D(ImplicitOps | ModRM), N, N,
3424 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3426 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3427 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3428 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3429 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3431 N, N, N, N, N, N, N, N,
3433 DI(ImplicitOps | Priv, wrmsr),
3434 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3435 DI(ImplicitOps | Priv, rdmsr),
3436 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3437 I(ImplicitOps | VendorSpecific, em_sysenter),
3438 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3440 N, N, N, N, N, N, N, N,
3442 X16(D(DstReg | SrcMem | ModRM | Mov)),
3444 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3449 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3454 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3458 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3460 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3461 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3462 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3463 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3465 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3466 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3467 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3468 D(DstMem | SrcReg | Src2CL | ModRM),
3469 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3471 D2bv(DstMem | SrcReg | ModRM | Lock),
3472 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3473 D(DstMem | SrcReg | ModRM | BitOp | Lock),
3474 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3475 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3476 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3479 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3480 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3481 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3483 D2bv(DstMem | SrcReg | ModRM | Lock),
3484 N, D(DstMem | SrcReg | ModRM | Mov),
3485 N, N, N, GD(0, &group9),
3486 N, N, N, N, N, N, N, N,
3488 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3490 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3492 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3508 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3512 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3518 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3519 unsigned size, bool sign_extension)
3521 int rc = X86EMUL_CONTINUE;
3525 op->addr.mem.ea = ctxt->_eip;
3526 /* NB. Immediates are sign-extended as necessary. */
3527 switch (op->bytes) {
3529 op->val = insn_fetch(s8, ctxt);
3532 op->val = insn_fetch(s16, ctxt);
3535 op->val = insn_fetch(s32, ctxt);
3538 if (!sign_extension) {
3539 switch (op->bytes) {
3547 op->val &= 0xffffffff;
3555 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3558 int rc = X86EMUL_CONTINUE;
3562 decode_register_operand(ctxt, op,
3564 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3567 rc = decode_imm(ctxt, op, 1, false);
3570 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3574 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3575 fetch_bit_operand(ctxt);
3576 op->orig_val = op->val;
3579 ctxt->memop.bytes = 8;
3583 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3584 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3585 fetch_register_operand(op);
3586 op->orig_val = op->val;
3590 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3592 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3593 op->addr.mem.seg = VCPU_SREG_ES;
3599 op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3600 fetch_register_operand(op);
3604 op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3607 rc = decode_imm(ctxt, op, 1, true);
3614 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3617 ctxt->memop.bytes = 2;
3620 ctxt->memop.bytes = 4;
3623 rc = decode_imm(ctxt, op, 2, false);
3626 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3630 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3632 register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3633 op->addr.mem.seg = seg_override(ctxt);
3638 op->addr.mem.ea = ctxt->_eip;
3639 op->bytes = ctxt->op_bytes + 2;
3640 insn_fetch_arr(op->valptr, op->bytes, ctxt);
3643 ctxt->memop.bytes = ctxt->op_bytes + 2;
3646 op->val = VCPU_SREG_ES;
3649 op->val = VCPU_SREG_CS;
3652 op->val = VCPU_SREG_SS;
3655 op->val = VCPU_SREG_DS;
3658 op->val = VCPU_SREG_FS;
3661 op->val = VCPU_SREG_GS;
3664 /* Special instructions do their own operand decoding. */
3666 op->type = OP_NONE; /* Disable writeback. */
3674 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3676 int rc = X86EMUL_CONTINUE;
3677 int mode = ctxt->mode;
3678 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3679 bool op_prefix = false;
3680 struct opcode opcode;
3682 ctxt->memop.type = OP_NONE;
3683 ctxt->memopp = NULL;
3684 ctxt->_eip = ctxt->eip;
3685 ctxt->fetch.start = ctxt->_eip;
3686 ctxt->fetch.end = ctxt->fetch.start + insn_len;
3688 memcpy(ctxt->fetch.data, insn, insn_len);
3691 case X86EMUL_MODE_REAL:
3692 case X86EMUL_MODE_VM86:
3693 case X86EMUL_MODE_PROT16:
3694 def_op_bytes = def_ad_bytes = 2;
3696 case X86EMUL_MODE_PROT32:
3697 def_op_bytes = def_ad_bytes = 4;
3699 #ifdef CONFIG_X86_64
3700 case X86EMUL_MODE_PROT64:
3706 return EMULATION_FAILED;
3709 ctxt->op_bytes = def_op_bytes;
3710 ctxt->ad_bytes = def_ad_bytes;
3712 /* Legacy prefixes. */
3714 switch (ctxt->b = insn_fetch(u8, ctxt)) {
3715 case 0x66: /* operand-size override */
3717 /* switch between 2/4 bytes */
3718 ctxt->op_bytes = def_op_bytes ^ 6;
3720 case 0x67: /* address-size override */
3721 if (mode == X86EMUL_MODE_PROT64)
3722 /* switch between 4/8 bytes */
3723 ctxt->ad_bytes = def_ad_bytes ^ 12;
3725 /* switch between 2/4 bytes */
3726 ctxt->ad_bytes = def_ad_bytes ^ 6;
3728 case 0x26: /* ES override */
3729 case 0x2e: /* CS override */
3730 case 0x36: /* SS override */
3731 case 0x3e: /* DS override */
3732 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3734 case 0x64: /* FS override */
3735 case 0x65: /* GS override */
3736 set_seg_override(ctxt, ctxt->b & 7);
3738 case 0x40 ... 0x4f: /* REX */
3739 if (mode != X86EMUL_MODE_PROT64)
3741 ctxt->rex_prefix = ctxt->b;
3743 case 0xf0: /* LOCK */
3744 ctxt->lock_prefix = 1;
3746 case 0xf2: /* REPNE/REPNZ */
3747 case 0xf3: /* REP/REPE/REPZ */
3748 ctxt->rep_prefix = ctxt->b;
3754 /* Any legacy prefix after a REX prefix nullifies its effect. */
3756 ctxt->rex_prefix = 0;
3762 if (ctxt->rex_prefix & 8)
3763 ctxt->op_bytes = 8; /* REX.W */
3765 /* Opcode byte(s). */
3766 opcode = opcode_table[ctxt->b];
3767 /* Two-byte opcode? */
3768 if (ctxt->b == 0x0f) {
3770 ctxt->b = insn_fetch(u8, ctxt);
3771 opcode = twobyte_table[ctxt->b];
3773 ctxt->d = opcode.flags;
3775 while (ctxt->d & GroupMask) {
3776 switch (ctxt->d & GroupMask) {
3778 ctxt->modrm = insn_fetch(u8, ctxt);
3780 goffset = (ctxt->modrm >> 3) & 7;
3781 opcode = opcode.u.group[goffset];
3784 ctxt->modrm = insn_fetch(u8, ctxt);
3786 goffset = (ctxt->modrm >> 3) & 7;
3787 if ((ctxt->modrm >> 6) == 3)
3788 opcode = opcode.u.gdual->mod3[goffset];
3790 opcode = opcode.u.gdual->mod012[goffset];
3793 goffset = ctxt->modrm & 7;
3794 opcode = opcode.u.group[goffset];
3797 if (ctxt->rep_prefix && op_prefix)
3798 return EMULATION_FAILED;
3799 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3800 switch (simd_prefix) {
3801 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3802 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3803 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3804 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3808 return EMULATION_FAILED;
3811 ctxt->d &= ~(u64)GroupMask;
3812 ctxt->d |= opcode.flags;
3815 ctxt->execute = opcode.u.execute;
3816 ctxt->check_perm = opcode.check_perm;
3817 ctxt->intercept = opcode.intercept;
3820 if (ctxt->d == 0 || (ctxt->d & Undefined))
3821 return EMULATION_FAILED;
3823 if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3824 return EMULATION_FAILED;
3826 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3829 if (ctxt->d & Op3264) {
3830 if (mode == X86EMUL_MODE_PROT64)
3837 ctxt->op_bytes = 16;
3839 /* ModRM and SIB bytes. */
3840 if (ctxt->d & ModRM) {
3841 rc = decode_modrm(ctxt, &ctxt->memop);
3842 if (!ctxt->has_seg_override)
3843 set_seg_override(ctxt, ctxt->modrm_seg);
3844 } else if (ctxt->d & MemAbs)
3845 rc = decode_abs(ctxt, &ctxt->memop);
3846 if (rc != X86EMUL_CONTINUE)
3849 if (!ctxt->has_seg_override)
3850 set_seg_override(ctxt, VCPU_SREG_DS);
3852 ctxt->memop.addr.mem.seg = seg_override(ctxt);
3854 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
3855 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
3858 * Decode and fetch the source operand: register, memory
3861 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
3862 if (rc != X86EMUL_CONTINUE)
3866 * Decode and fetch the second source operand: register, memory
3869 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
3870 if (rc != X86EMUL_CONTINUE)
3873 /* Decode and fetch the destination operand: register or memory. */
3874 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
3877 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
3878 ctxt->memopp->addr.mem.ea += ctxt->_eip;
3880 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
3883 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3885 /* The second termination condition only applies for REPE
3886 * and REPNE. Test if the repeat string operation prefix is
3887 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3888 * corresponding termination condition according to:
3889 * - if REPE/REPZ and ZF = 0 then done
3890 * - if REPNE/REPNZ and ZF = 1 then done
3892 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3893 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3894 && (((ctxt->rep_prefix == REPE_PREFIX) &&
3895 ((ctxt->eflags & EFLG_ZF) == 0))
3896 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
3897 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3903 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3905 struct x86_emulate_ops *ops = ctxt->ops;
3907 int rc = X86EMUL_CONTINUE;
3908 int saved_dst_type = ctxt->dst.type;
3910 ctxt->mem_read.pos = 0;
3912 if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3913 rc = emulate_ud(ctxt);
3917 /* LOCK prefix is allowed only with some instructions */
3918 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3919 rc = emulate_ud(ctxt);
3923 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3924 rc = emulate_ud(ctxt);
3929 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3930 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3931 rc = emulate_ud(ctxt);
3935 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3936 rc = emulate_nm(ctxt);
3940 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3941 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3942 X86_ICPT_PRE_EXCEPT);
3943 if (rc != X86EMUL_CONTINUE)
3947 /* Privileged instruction can be executed only in CPL=0 */
3948 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3949 rc = emulate_gp(ctxt, 0);
3953 /* Instruction can only be executed in protected mode */
3954 if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3955 rc = emulate_ud(ctxt);
3959 /* Do instruction specific permission checks */
3960 if (ctxt->check_perm) {
3961 rc = ctxt->check_perm(ctxt);
3962 if (rc != X86EMUL_CONTINUE)
3966 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3967 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3968 X86_ICPT_POST_EXCEPT);
3969 if (rc != X86EMUL_CONTINUE)
3973 if (ctxt->rep_prefix && (ctxt->d & String)) {
3974 /* All REP prefixes have the same first termination condition */
3975 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3976 ctxt->eip = ctxt->_eip;
3981 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3982 rc = segmented_read(ctxt, ctxt->src.addr.mem,
3983 ctxt->src.valptr, ctxt->src.bytes);
3984 if (rc != X86EMUL_CONTINUE)
3986 ctxt->src.orig_val64 = ctxt->src.val64;
3989 if (ctxt->src2.type == OP_MEM) {
3990 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3991 &ctxt->src2.val, ctxt->src2.bytes);
3992 if (rc != X86EMUL_CONTINUE)
3996 if ((ctxt->d & DstMask) == ImplicitOps)
4000 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4001 /* optimisation - avoid slow emulated read if Mov */
4002 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4003 &ctxt->dst.val, ctxt->dst.bytes);
4004 if (rc != X86EMUL_CONTINUE)
4007 ctxt->dst.orig_val = ctxt->dst.val;
4011 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4012 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4013 X86_ICPT_POST_MEMACCESS);
4014 if (rc != X86EMUL_CONTINUE)
4018 if (ctxt->execute) {
4019 rc = ctxt->execute(ctxt);
4020 if (rc != X86EMUL_CONTINUE)
4029 case 0x40 ... 0x47: /* inc r16/r32 */
4030 emulate_1op(ctxt, "inc");
4032 case 0x48 ... 0x4f: /* dec r16/r32 */
4033 emulate_1op(ctxt, "dec");
4035 case 0x63: /* movsxd */
4036 if (ctxt->mode != X86EMUL_MODE_PROT64)
4037 goto cannot_emulate;
4038 ctxt->dst.val = (s32) ctxt->src.val;
4040 case 0x6c: /* insb */
4041 case 0x6d: /* insw/insd */
4042 ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
4044 case 0x6e: /* outsb */
4045 case 0x6f: /* outsw/outsd */
4046 ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
4049 case 0x70 ... 0x7f: /* jcc (short) */
4050 if (test_cc(ctxt->b, ctxt->eflags))
4051 rc = jmp_rel(ctxt, ctxt->src.val);
4053 case 0x8d: /* lea r16/r32, m */
4054 ctxt->dst.val = ctxt->src.addr.mem.ea;
4056 case 0x8f: /* pop (sole member of Grp1a) */
4057 rc = em_grp1a(ctxt);
4059 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4060 if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
4064 case 0x98: /* cbw/cwde/cdqe */
4065 switch (ctxt->op_bytes) {
4066 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4067 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4068 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4074 case 0xcc: /* int3 */
4075 rc = emulate_int(ctxt, 3);
4077 case 0xcd: /* int n */
4078 rc = emulate_int(ctxt, ctxt->src.val);
4080 case 0xce: /* into */
4081 if (ctxt->eflags & EFLG_OF)
4082 rc = emulate_int(ctxt, 4);
4084 case 0xd0 ... 0xd1: /* Grp2 */
4087 case 0xd2 ... 0xd3: /* Grp2 */
4088 ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
4091 case 0xe4: /* inb */
4094 case 0xe6: /* outb */
4095 case 0xe7: /* out */
4097 case 0xe9: /* jmp rel */
4098 case 0xeb: /* jmp rel short */
4099 rc = jmp_rel(ctxt, ctxt->src.val);
4100 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4102 case 0xec: /* in al,dx */
4103 case 0xed: /* in (e/r)ax,dx */
4105 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
4107 goto done; /* IO is needed */
4109 case 0xee: /* out dx,al */
4110 case 0xef: /* out dx,(e/r)ax */
4112 ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
4114 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4116 case 0xf4: /* hlt */
4117 ctxt->ops->halt(ctxt);
4119 case 0xf5: /* cmc */
4120 /* complement carry flag from eflags reg */
4121 ctxt->eflags ^= EFLG_CF;
4123 case 0xf8: /* clc */
4124 ctxt->eflags &= ~EFLG_CF;
4126 case 0xf9: /* stc */
4127 ctxt->eflags |= EFLG_CF;
4129 case 0xfc: /* cld */
4130 ctxt->eflags &= ~EFLG_DF;
4132 case 0xfd: /* std */
4133 ctxt->eflags |= EFLG_DF;
4135 case 0xfe: /* Grp4 */
4136 rc = em_grp45(ctxt);
4138 case 0xff: /* Grp5 */
4139 rc = em_grp45(ctxt);
4142 goto cannot_emulate;
4145 if (rc != X86EMUL_CONTINUE)
4149 rc = writeback(ctxt);
4150 if (rc != X86EMUL_CONTINUE)
4154 * restore dst type in case the decoding will be reused
4155 * (happens for string instruction )
4157 ctxt->dst.type = saved_dst_type;
4159 if ((ctxt->d & SrcMask) == SrcSI)
4160 string_addr_inc(ctxt, seg_override(ctxt),
4161 VCPU_REGS_RSI, &ctxt->src);
4163 if ((ctxt->d & DstMask) == DstDI)
4164 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4167 if (ctxt->rep_prefix && (ctxt->d & String)) {
4168 struct read_cache *r = &ctxt->io_read;
4169 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4171 if (!string_insn_completed(ctxt)) {
4173 * Re-enter guest when pio read ahead buffer is empty
4174 * or, if it is not used, after each 1024 iteration.
4176 if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4177 (r->end == 0 || r->end != r->pos)) {
4179 * Reset read cache. Usually happens before
4180 * decode, but since instruction is restarted
4181 * we have to do it here.
4183 ctxt->mem_read.end = 0;
4184 return EMULATION_RESTART;
4186 goto done; /* skip rip writeback */
4190 ctxt->eip = ctxt->_eip;
4193 if (rc == X86EMUL_PROPAGATE_FAULT)
4194 ctxt->have_exception = true;
4195 if (rc == X86EMUL_INTERCEPTED)
4196 return EMULATION_INTERCEPTED;
4198 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4202 case 0x09: /* wbinvd */
4203 (ctxt->ops->wbinvd)(ctxt);
4205 case 0x08: /* invd */
4206 case 0x0d: /* GrpP (prefetch) */
4207 case 0x18: /* Grp16 (prefetch/nop) */
4209 case 0x20: /* mov cr, reg */
4210 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4212 case 0x21: /* mov from dr to reg */
4213 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4215 case 0x22: /* mov reg, cr */
4216 if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4217 emulate_gp(ctxt, 0);
4218 rc = X86EMUL_PROPAGATE_FAULT;
4221 ctxt->dst.type = OP_NONE;
4223 case 0x23: /* mov from reg to dr */
4224 if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4225 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4226 ~0ULL : ~0U)) < 0) {
4227 /* #UD condition is already handled by the code above */
4228 emulate_gp(ctxt, 0);
4229 rc = X86EMUL_PROPAGATE_FAULT;
4233 ctxt->dst.type = OP_NONE; /* no writeback */
4237 msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4238 | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4239 if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4240 emulate_gp(ctxt, 0);
4241 rc = X86EMUL_PROPAGATE_FAULT;
4244 rc = X86EMUL_CONTINUE;
4248 if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4249 emulate_gp(ctxt, 0);
4250 rc = X86EMUL_PROPAGATE_FAULT;
4253 ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4254 ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4256 rc = X86EMUL_CONTINUE;
4258 case 0x40 ... 0x4f: /* cmov */
4259 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4260 if (!test_cc(ctxt->b, ctxt->eflags))
4261 ctxt->dst.type = OP_NONE; /* no writeback */
4263 case 0x80 ... 0x8f: /* jnz rel, etc*/
4264 if (test_cc(ctxt->b, ctxt->eflags))
4265 rc = jmp_rel(ctxt, ctxt->src.val);
4267 case 0x90 ... 0x9f: /* setcc r/m8 */
4268 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4272 ctxt->dst.type = OP_NONE;
4273 /* only subword offset */
4274 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4275 emulate_2op_SrcV_nobyte(ctxt, "bt");
4277 case 0xa4: /* shld imm8, r, r/m */
4278 case 0xa5: /* shld cl, r, r/m */
4279 emulate_2op_cl(ctxt, "shld");
4283 emulate_2op_SrcV_nobyte(ctxt, "bts");
4285 case 0xac: /* shrd imm8, r, r/m */
4286 case 0xad: /* shrd cl, r, r/m */
4287 emulate_2op_cl(ctxt, "shrd");
4289 case 0xae: /* clflush */
4291 case 0xb0 ... 0xb1: /* cmpxchg */
4293 * Save real source value, then compare EAX against
4296 ctxt->src.orig_val = ctxt->src.val;
4297 ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4298 emulate_2op_SrcV(ctxt, "cmp");
4299 if (ctxt->eflags & EFLG_ZF) {
4300 /* Success: write back to memory. */
4301 ctxt->dst.val = ctxt->src.orig_val;
4303 /* Failure: write the value we saw to EAX. */
4304 ctxt->dst.type = OP_REG;
4305 ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4310 emulate_2op_SrcV_nobyte(ctxt, "btr");
4312 case 0xb6 ... 0xb7: /* movzx */
4313 ctxt->dst.bytes = ctxt->op_bytes;
4314 ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4315 : (u16) ctxt->src.val;
4317 case 0xba: /* Grp8 */
4318 switch (ctxt->modrm_reg & 3) {
4331 emulate_2op_SrcV_nobyte(ctxt, "btc");
4333 case 0xbc: { /* bsf */
4335 __asm__ ("bsf %2, %0; setz %1"
4336 : "=r"(ctxt->dst.val), "=q"(zf)
4337 : "r"(ctxt->src.val));
4338 ctxt->eflags &= ~X86_EFLAGS_ZF;
4340 ctxt->eflags |= X86_EFLAGS_ZF;
4341 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4345 case 0xbd: { /* bsr */
4347 __asm__ ("bsr %2, %0; setz %1"
4348 : "=r"(ctxt->dst.val), "=q"(zf)
4349 : "r"(ctxt->src.val));
4350 ctxt->eflags &= ~X86_EFLAGS_ZF;
4352 ctxt->eflags |= X86_EFLAGS_ZF;
4353 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4357 case 0xbe ... 0xbf: /* movsx */
4358 ctxt->dst.bytes = ctxt->op_bytes;
4359 ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4360 (s16) ctxt->src.val;
4362 case 0xc0 ... 0xc1: /* xadd */
4363 emulate_2op_SrcV(ctxt, "add");
4364 /* Write back the register source. */
4365 ctxt->src.val = ctxt->dst.orig_val;
4366 write_register_operand(&ctxt->src);
4368 case 0xc3: /* movnti */
4369 ctxt->dst.bytes = ctxt->op_bytes;
4370 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4371 (u64) ctxt->src.val;
4373 case 0xc7: /* Grp9 (cmpxchg8b) */
4377 goto cannot_emulate;
4380 if (rc != X86EMUL_CONTINUE)
4386 return EMULATION_FAILED;