1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54 #define OpES 20ull /* ES */
55 #define OpCS 21ull /* CS */
56 #define OpSS 22ull /* SS */
57 #define OpDS 23ull /* DS */
58 #define OpFS 24ull /* FS */
59 #define OpGS 25ull /* GS */
61 #define OpBits 5 /* Width of operand field */
62 #define OpMask ((1ull << OpBits) - 1)
65 * Opcode effective-address decode tables.
66 * Note that we only emulate instructions that have at least one memory
67 * operand (excluding implicit stack references). We assume that stack
68 * references and instruction fetches will never occur in special memory
69 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
73 /* Operand sizes: 8-bit operands or specified/overridden size. */
74 #define ByteOp (1<<0) /* 8-bit operands. */
75 /* Destination operand type. */
77 #define ImplicitOps (OpImplicit << DstShift)
78 #define DstReg (OpReg << DstShift)
79 #define DstMem (OpMem << DstShift)
80 #define DstAcc (OpAcc << DstShift)
81 #define DstDI (OpDI << DstShift)
82 #define DstMem64 (OpMem64 << DstShift)
83 #define DstImmUByte (OpImmUByte << DstShift)
84 #define DstDX (OpDX << DstShift)
85 #define DstMask (OpMask << DstShift)
86 /* Source operand type. */
88 #define SrcNone (OpNone << SrcShift)
89 #define SrcReg (OpReg << SrcShift)
90 #define SrcMem (OpMem << SrcShift)
91 #define SrcMem16 (OpMem16 << SrcShift)
92 #define SrcMem32 (OpMem32 << SrcShift)
93 #define SrcImm (OpImm << SrcShift)
94 #define SrcImmByte (OpImmByte << SrcShift)
95 #define SrcOne (OpOne << SrcShift)
96 #define SrcImmUByte (OpImmUByte << SrcShift)
97 #define SrcImmU (OpImmU << SrcShift)
98 #define SrcSI (OpSI << SrcShift)
99 #define SrcImmFAddr (OpImmFAddr << SrcShift)
100 #define SrcMemFAddr (OpMemFAddr << SrcShift)
101 #define SrcAcc (OpAcc << SrcShift)
102 #define SrcImmU16 (OpImmU16 << SrcShift)
103 #define SrcDX (OpDX << SrcShift)
104 #define SrcMask (OpMask << SrcShift)
105 #define BitOp (1<<11)
106 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
107 #define String (1<<13) /* String instruction (rep capable) */
108 #define Stack (1<<14) /* Stack instruction (push/pop) */
109 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
110 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
111 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
112 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
113 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
114 #define Sse (1<<18) /* SSE Vector instruction */
115 /* Generic ModRM decode. */
116 #define ModRM (1<<19)
117 /* Destination is only written; never read. */
120 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
121 #define VendorSpecific (1<<22) /* Vendor specific instruction */
122 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
123 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
124 #define Undefined (1<<25) /* No Such Instruction */
125 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
126 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
128 /* Source 2 operand type */
129 #define Src2Shift (29)
130 #define Src2None (OpNone << Src2Shift)
131 #define Src2CL (OpCL << Src2Shift)
132 #define Src2ImmByte (OpImmByte << Src2Shift)
133 #define Src2One (OpOne << Src2Shift)
134 #define Src2Imm (OpImm << Src2Shift)
135 #define Src2ES (OpES << Src2Shift)
136 #define Src2CS (OpCS << Src2Shift)
137 #define Src2SS (OpSS << Src2Shift)
138 #define Src2DS (OpDS << Src2Shift)
139 #define Src2FS (OpFS << Src2Shift)
140 #define Src2GS (OpGS << Src2Shift)
141 #define Src2Mask (OpMask << Src2Shift)
143 #define X2(x...) x, x
144 #define X3(x...) X2(x), x
145 #define X4(x...) X2(x), X2(x)
146 #define X5(x...) X4(x), x
147 #define X6(x...) X4(x), X2(x)
148 #define X7(x...) X4(x), X3(x)
149 #define X8(x...) X4(x), X4(x)
150 #define X16(x...) X8(x), X8(x)
156 int (*execute)(struct x86_emulate_ctxt *ctxt);
157 struct opcode *group;
158 struct group_dual *gdual;
159 struct gprefix *gprefix;
161 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
165 struct opcode mod012[8];
166 struct opcode mod3[8];
170 struct opcode pfx_no;
171 struct opcode pfx_66;
172 struct opcode pfx_f2;
173 struct opcode pfx_f3;
176 /* EFLAGS bit definitions. */
177 #define EFLG_ID (1<<21)
178 #define EFLG_VIP (1<<20)
179 #define EFLG_VIF (1<<19)
180 #define EFLG_AC (1<<18)
181 #define EFLG_VM (1<<17)
182 #define EFLG_RF (1<<16)
183 #define EFLG_IOPL (3<<12)
184 #define EFLG_NT (1<<14)
185 #define EFLG_OF (1<<11)
186 #define EFLG_DF (1<<10)
187 #define EFLG_IF (1<<9)
188 #define EFLG_TF (1<<8)
189 #define EFLG_SF (1<<7)
190 #define EFLG_ZF (1<<6)
191 #define EFLG_AF (1<<4)
192 #define EFLG_PF (1<<2)
193 #define EFLG_CF (1<<0)
195 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
196 #define EFLG_RESERVED_ONE_MASK 2
199 * Instruction emulation:
200 * Most instructions are emulated directly via a fragment of inline assembly
201 * code. This allows us to save/restore EFLAGS and thus very easily pick up
202 * any modified flags.
205 #if defined(CONFIG_X86_64)
206 #define _LO32 "k" /* force 32-bit operand */
207 #define _STK "%%rsp" /* stack pointer */
208 #elif defined(__i386__)
209 #define _LO32 "" /* force 32-bit operand */
210 #define _STK "%%esp" /* stack pointer */
214 * These EFLAGS bits are restored from saved value during emulation, and
215 * any changes are written back to the saved value after emulation.
217 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
219 /* Before executing instruction: restore necessary bits in EFLAGS. */
220 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
221 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
222 "movl %"_sav",%"_LO32 _tmp"; " \
225 "movl %"_msk",%"_LO32 _tmp"; " \
226 "andl %"_LO32 _tmp",("_STK"); " \
228 "notl %"_LO32 _tmp"; " \
229 "andl %"_LO32 _tmp",("_STK"); " \
230 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
232 "orl %"_LO32 _tmp",("_STK"); " \
236 /* After executing instruction: write-back necessary bits in EFLAGS. */
237 #define _POST_EFLAGS(_sav, _msk, _tmp) \
238 /* _sav |= EFLAGS & _msk; */ \
241 "andl %"_msk",%"_LO32 _tmp"; " \
242 "orl %"_LO32 _tmp",%"_sav"; "
250 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
252 __asm__ __volatile__ ( \
253 _PRE_EFLAGS("0", "4", "2") \
254 _op _suffix " %"_x"3,%1; " \
255 _POST_EFLAGS("0", "4", "2") \
256 : "=m" ((ctxt)->eflags), \
257 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
259 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
263 /* Raw emulation: instruction has two explicit operands. */
264 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
266 unsigned long _tmp; \
268 switch ((ctxt)->dst.bytes) { \
270 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
273 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
276 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
281 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
283 unsigned long _tmp; \
284 switch ((ctxt)->dst.bytes) { \
286 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
289 __emulate_2op_nobyte(ctxt, _op, \
290 _wx, _wy, _lx, _ly, _qx, _qy); \
295 /* Source operand is byte-sized and may be restricted to just %cl. */
296 #define emulate_2op_SrcB(ctxt, _op) \
297 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
299 /* Source operand is byte, word, long or quad sized. */
300 #define emulate_2op_SrcV(ctxt, _op) \
301 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
303 /* Source operand is word, long or quad sized. */
304 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
305 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
307 /* Instruction has three operands and one operand is stored in ECX register */
308 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
310 unsigned long _tmp; \
311 _type _clv = (ctxt)->src2.val; \
312 _type _srcv = (ctxt)->src.val; \
313 _type _dstv = (ctxt)->dst.val; \
315 __asm__ __volatile__ ( \
316 _PRE_EFLAGS("0", "5", "2") \
317 _op _suffix " %4,%1 \n" \
318 _POST_EFLAGS("0", "5", "2") \
319 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
320 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
323 (ctxt)->src2.val = (unsigned long) _clv; \
324 (ctxt)->src2.val = (unsigned long) _srcv; \
325 (ctxt)->dst.val = (unsigned long) _dstv; \
328 #define emulate_2op_cl(ctxt, _op) \
330 switch ((ctxt)->dst.bytes) { \
332 __emulate_2op_cl(ctxt, _op, "w", u16); \
335 __emulate_2op_cl(ctxt, _op, "l", u32); \
338 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
343 #define __emulate_1op(ctxt, _op, _suffix) \
345 unsigned long _tmp; \
347 __asm__ __volatile__ ( \
348 _PRE_EFLAGS("0", "3", "2") \
349 _op _suffix " %1; " \
350 _POST_EFLAGS("0", "3", "2") \
351 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
353 : "i" (EFLAGS_MASK)); \
356 /* Instruction has only one explicit operand (no source operand). */
357 #define emulate_1op(ctxt, _op) \
359 switch ((ctxt)->dst.bytes) { \
360 case 1: __emulate_1op(ctxt, _op, "b"); break; \
361 case 2: __emulate_1op(ctxt, _op, "w"); break; \
362 case 4: __emulate_1op(ctxt, _op, "l"); break; \
363 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
367 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
369 unsigned long _tmp; \
370 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
371 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
373 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0", "5", "1") \
376 _op _suffix " %6; " \
378 _POST_EFLAGS("0", "5", "1") \
379 ".pushsection .fixup,\"ax\" \n\t" \
380 "3: movb $1, %4 \n\t" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
385 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
386 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
387 "a" (*rax), "d" (*rdx)); \
390 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
391 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
393 switch((ctxt)->src.bytes) { \
395 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
398 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
401 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
404 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
409 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
410 enum x86_intercept intercept,
411 enum x86_intercept_stage stage)
413 struct x86_instruction_info info = {
414 .intercept = intercept,
415 .rep_prefix = ctxt->rep_prefix,
416 .modrm_mod = ctxt->modrm_mod,
417 .modrm_reg = ctxt->modrm_reg,
418 .modrm_rm = ctxt->modrm_rm,
419 .src_val = ctxt->src.val64,
420 .src_bytes = ctxt->src.bytes,
421 .dst_bytes = ctxt->dst.bytes,
422 .ad_bytes = ctxt->ad_bytes,
423 .next_rip = ctxt->eip,
426 return ctxt->ops->intercept(ctxt, &info, stage);
429 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
431 return (1UL << (ctxt->ad_bytes << 3)) - 1;
434 /* Access/update address held in a register, based on addressing mode. */
435 static inline unsigned long
436 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
438 if (ctxt->ad_bytes == sizeof(unsigned long))
441 return reg & ad_mask(ctxt);
444 static inline unsigned long
445 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
447 return address_mask(ctxt, reg);
451 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
453 if (ctxt->ad_bytes == sizeof(unsigned long))
456 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
459 static u32 desc_limit_scaled(struct desc_struct *desc)
461 u32 limit = get_desc_limit(desc);
463 return desc->g ? (limit << 12) | 0xfff : limit;
466 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
468 ctxt->has_seg_override = true;
469 ctxt->seg_override = seg;
472 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
474 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
477 return ctxt->ops->get_cached_segment_base(ctxt, seg);
480 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
482 if (!ctxt->has_seg_override)
485 return ctxt->seg_override;
488 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
489 u32 error, bool valid)
491 ctxt->exception.vector = vec;
492 ctxt->exception.error_code = error;
493 ctxt->exception.error_code_valid = valid;
494 return X86EMUL_PROPAGATE_FAULT;
497 static int emulate_db(struct x86_emulate_ctxt *ctxt)
499 return emulate_exception(ctxt, DB_VECTOR, 0, false);
502 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
504 return emulate_exception(ctxt, GP_VECTOR, err, true);
507 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
509 return emulate_exception(ctxt, SS_VECTOR, err, true);
512 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
514 return emulate_exception(ctxt, UD_VECTOR, 0, false);
517 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
519 return emulate_exception(ctxt, TS_VECTOR, err, true);
522 static int emulate_de(struct x86_emulate_ctxt *ctxt)
524 return emulate_exception(ctxt, DE_VECTOR, 0, false);
527 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
529 return emulate_exception(ctxt, NM_VECTOR, 0, false);
532 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
535 switch (ctxt->op_bytes) {
537 ctxt->_eip = (u16)dst;
540 ctxt->_eip = (u32)dst;
543 if ((cs_l && is_noncanonical_address(dst)) ||
544 (!cs_l && (dst & ~(u32)-1)))
545 return emulate_gp(ctxt, 0);
549 WARN(1, "unsupported eip assignment size\n");
551 return X86EMUL_CONTINUE;
554 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
556 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
559 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
561 return assign_eip_near(ctxt, ctxt->_eip + rel);
564 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
567 struct desc_struct desc;
569 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
573 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
578 struct desc_struct desc;
580 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
581 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
584 static int __linearize(struct x86_emulate_ctxt *ctxt,
585 struct segmented_address addr,
586 unsigned size, bool write, bool fetch,
589 struct desc_struct desc;
596 la = seg_base(ctxt, addr.seg) + addr.ea;
597 switch (ctxt->mode) {
598 case X86EMUL_MODE_REAL:
600 case X86EMUL_MODE_PROT64:
601 if (((signed long)la << 16) >> 16 != la)
602 return emulate_gp(ctxt, 0);
605 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
609 /* code segment or read-only data segment */
610 if (((desc.type & 8) || !(desc.type & 2)) && write)
612 /* unreadable code segment */
613 if (!fetch && (desc.type & 8) && !(desc.type & 2))
615 lim = desc_limit_scaled(&desc);
616 if ((desc.type & 8) || !(desc.type & 4)) {
617 /* expand-up segment */
618 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
621 /* exapand-down segment */
622 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
624 lim = desc.d ? 0xffffffff : 0xffff;
625 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
628 cpl = ctxt->ops->cpl(ctxt);
631 if (!(desc.type & 8)) {
635 } else if ((desc.type & 8) && !(desc.type & 4)) {
636 /* nonconforming code segment */
639 } else if ((desc.type & 8) && (desc.type & 4)) {
640 /* conforming code segment */
646 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
649 return X86EMUL_CONTINUE;
651 if (addr.seg == VCPU_SREG_SS)
652 return emulate_ss(ctxt, addr.seg);
654 return emulate_gp(ctxt, addr.seg);
657 static int linearize(struct x86_emulate_ctxt *ctxt,
658 struct segmented_address addr,
659 unsigned size, bool write,
662 return __linearize(ctxt, addr, size, write, false, linear);
666 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
667 struct segmented_address addr,
674 rc = linearize(ctxt, addr, size, false, &linear);
675 if (rc != X86EMUL_CONTINUE)
677 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
681 * Fetch the next byte of the instruction being emulated which is pointed to
682 * by ctxt->_eip, then increment ctxt->_eip.
684 * Also prefetch the remaining bytes of the instruction without crossing page
685 * boundary if they are not in fetch_cache yet.
687 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
689 struct fetch_cache *fc = &ctxt->fetch;
693 if (ctxt->_eip == fc->end) {
694 unsigned long linear;
695 struct segmented_address addr = { .seg = VCPU_SREG_CS,
697 cur_size = fc->end - fc->start;
698 size = min(15UL - cur_size,
699 PAGE_SIZE - offset_in_page(ctxt->_eip));
700 rc = __linearize(ctxt, addr, size, false, true, &linear);
701 if (unlikely(rc != X86EMUL_CONTINUE))
703 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
704 size, &ctxt->exception);
705 if (unlikely(rc != X86EMUL_CONTINUE))
709 *dest = fc->data[ctxt->_eip - fc->start];
711 return X86EMUL_CONTINUE;
714 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
715 void *dest, unsigned size)
719 /* x86 instructions are limited to 15 bytes. */
720 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
721 return X86EMUL_UNHANDLEABLE;
723 rc = do_insn_fetch_byte(ctxt, dest++);
724 if (rc != X86EMUL_CONTINUE)
727 return X86EMUL_CONTINUE;
730 /* Fetch next part of the instruction being emulated. */
731 #define insn_fetch(_type, _ctxt) \
732 ({ unsigned long _x; \
733 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
734 if (rc != X86EMUL_CONTINUE) \
739 #define insn_fetch_arr(_arr, _size, _ctxt) \
740 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
741 if (rc != X86EMUL_CONTINUE) \
746 * Given the 'reg' portion of a ModRM byte, and a register block, return a
747 * pointer into the block that addresses the relevant register.
748 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
750 static void *decode_register(u8 modrm_reg, unsigned long *regs,
755 p = ®s[modrm_reg];
756 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
757 p = (unsigned char *)®s[modrm_reg & 3] + 1;
761 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
762 struct segmented_address addr,
763 u16 *size, unsigned long *address, int op_bytes)
770 rc = segmented_read_std(ctxt, addr, size, 2);
771 if (rc != X86EMUL_CONTINUE)
774 rc = segmented_read_std(ctxt, addr, address, op_bytes);
778 static int test_cc(unsigned int condition, unsigned int flags)
782 switch ((condition & 15) >> 1) {
784 rc |= (flags & EFLG_OF);
786 case 1: /* b/c/nae */
787 rc |= (flags & EFLG_CF);
790 rc |= (flags & EFLG_ZF);
793 rc |= (flags & (EFLG_CF|EFLG_ZF));
796 rc |= (flags & EFLG_SF);
799 rc |= (flags & EFLG_PF);
802 rc |= (flags & EFLG_ZF);
805 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
809 /* Odd condition identifiers (lsb == 1) have inverted sense. */
810 return (!!rc ^ (condition & 1));
813 static void fetch_register_operand(struct operand *op)
817 op->val = *(u8 *)op->addr.reg;
820 op->val = *(u16 *)op->addr.reg;
823 op->val = *(u32 *)op->addr.reg;
826 op->val = *(u64 *)op->addr.reg;
831 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
833 ctxt->ops->get_fpu(ctxt);
835 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
836 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
837 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
838 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
839 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
840 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
841 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
842 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
844 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
845 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
846 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
847 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
848 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
849 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
850 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
851 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
855 ctxt->ops->put_fpu(ctxt);
858 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
861 ctxt->ops->get_fpu(ctxt);
863 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
864 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
865 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
866 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
867 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
868 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
869 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
870 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
872 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
873 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
874 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
875 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
876 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
877 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
878 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
879 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
883 ctxt->ops->put_fpu(ctxt);
886 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
890 unsigned reg = ctxt->modrm_reg;
891 int highbyte_regs = ctxt->rex_prefix == 0;
893 if (!(ctxt->d & ModRM))
894 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
900 read_sse_reg(ctxt, &op->vec_val, reg);
905 if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
906 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
909 op->addr.reg = decode_register(reg, ctxt->regs, 0);
910 op->bytes = ctxt->op_bytes;
912 fetch_register_operand(op);
913 op->orig_val = op->val;
916 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
920 int index_reg = 0, base_reg = 0, scale;
921 int rc = X86EMUL_CONTINUE;
924 if (ctxt->rex_prefix) {
925 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
926 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
927 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
930 ctxt->modrm = insn_fetch(u8, ctxt);
931 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
932 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
933 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
934 ctxt->modrm_seg = VCPU_SREG_DS;
936 if (ctxt->modrm_mod == 3) {
938 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
939 op->addr.reg = decode_register(ctxt->modrm_rm,
940 ctxt->regs, ctxt->d & ByteOp);
944 op->addr.xmm = ctxt->modrm_rm;
945 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
948 fetch_register_operand(op);
954 if (ctxt->ad_bytes == 2) {
955 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
956 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
957 unsigned si = ctxt->regs[VCPU_REGS_RSI];
958 unsigned di = ctxt->regs[VCPU_REGS_RDI];
960 /* 16-bit ModR/M decode. */
961 switch (ctxt->modrm_mod) {
963 if (ctxt->modrm_rm == 6)
964 modrm_ea += insn_fetch(u16, ctxt);
967 modrm_ea += insn_fetch(s8, ctxt);
970 modrm_ea += insn_fetch(u16, ctxt);
973 switch (ctxt->modrm_rm) {
993 if (ctxt->modrm_mod != 0)
1000 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1001 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1002 ctxt->modrm_seg = VCPU_SREG_SS;
1003 modrm_ea = (u16)modrm_ea;
1005 /* 32/64-bit ModR/M decode. */
1006 if ((ctxt->modrm_rm & 7) == 4) {
1007 sib = insn_fetch(u8, ctxt);
1008 index_reg |= (sib >> 3) & 7;
1009 base_reg |= sib & 7;
1012 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1013 modrm_ea += insn_fetch(s32, ctxt);
1015 modrm_ea += ctxt->regs[base_reg];
1017 modrm_ea += ctxt->regs[index_reg] << scale;
1018 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1019 if (ctxt->mode == X86EMUL_MODE_PROT64)
1020 ctxt->rip_relative = 1;
1022 modrm_ea += ctxt->regs[ctxt->modrm_rm];
1023 switch (ctxt->modrm_mod) {
1025 if (ctxt->modrm_rm == 5)
1026 modrm_ea += insn_fetch(s32, ctxt);
1029 modrm_ea += insn_fetch(s8, ctxt);
1032 modrm_ea += insn_fetch(s32, ctxt);
1036 op->addr.mem.ea = modrm_ea;
1041 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1044 int rc = X86EMUL_CONTINUE;
1047 switch (ctxt->ad_bytes) {
1049 op->addr.mem.ea = insn_fetch(u16, ctxt);
1052 op->addr.mem.ea = insn_fetch(u32, ctxt);
1055 op->addr.mem.ea = insn_fetch(u64, ctxt);
1062 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1066 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1067 mask = ~(ctxt->dst.bytes * 8 - 1);
1069 if (ctxt->src.bytes == 2)
1070 sv = (s16)ctxt->src.val & (s16)mask;
1071 else if (ctxt->src.bytes == 4)
1072 sv = (s32)ctxt->src.val & (s32)mask;
1074 ctxt->dst.addr.mem.ea += (sv >> 3);
1077 /* only subword offset */
1078 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1081 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1082 unsigned long addr, void *dest, unsigned size)
1085 struct read_cache *mc = &ctxt->mem_read;
1088 int n = min(size, 8u);
1090 if (mc->pos < mc->end)
1093 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1095 if (rc != X86EMUL_CONTINUE)
1100 memcpy(dest, mc->data + mc->pos, n);
1105 return X86EMUL_CONTINUE;
1108 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1109 struct segmented_address addr,
1116 rc = linearize(ctxt, addr, size, false, &linear);
1117 if (rc != X86EMUL_CONTINUE)
1119 return read_emulated(ctxt, linear, data, size);
1122 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1123 struct segmented_address addr,
1130 rc = linearize(ctxt, addr, size, true, &linear);
1131 if (rc != X86EMUL_CONTINUE)
1133 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1137 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1138 struct segmented_address addr,
1139 const void *orig_data, const void *data,
1145 rc = linearize(ctxt, addr, size, true, &linear);
1146 if (rc != X86EMUL_CONTINUE)
1148 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1149 size, &ctxt->exception);
1152 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1153 unsigned int size, unsigned short port,
1156 struct read_cache *rc = &ctxt->io_read;
1158 if (rc->pos == rc->end) { /* refill pio read ahead */
1159 unsigned int in_page, n;
1160 unsigned int count = ctxt->rep_prefix ?
1161 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1162 in_page = (ctxt->eflags & EFLG_DF) ?
1163 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1164 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1165 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1169 rc->pos = rc->end = 0;
1170 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1175 memcpy(dest, rc->data + rc->pos, size);
1180 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1181 u16 selector, struct desc_ptr *dt)
1183 struct x86_emulate_ops *ops = ctxt->ops;
1185 if (selector & 1 << 2) {
1186 struct desc_struct desc;
1189 memset (dt, 0, sizeof *dt);
1190 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1193 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1194 dt->address = get_desc_base(&desc);
1196 ops->get_gdt(ctxt, dt);
1199 /* allowed just for 8 bytes segments */
1200 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1201 u16 selector, struct desc_struct *desc)
1204 u16 index = selector >> 3;
1207 get_descriptor_table_ptr(ctxt, selector, &dt);
1209 if (dt.size < index * 8 + 7)
1210 return emulate_gp(ctxt, selector & 0xfffc);
1212 addr = dt.address + index * 8;
1213 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1217 /* allowed just for 8 bytes segments */
1218 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1219 u16 selector, struct desc_struct *desc)
1222 u16 index = selector >> 3;
1225 get_descriptor_table_ptr(ctxt, selector, &dt);
1227 if (dt.size < index * 8 + 7)
1228 return emulate_gp(ctxt, selector & 0xfffc);
1230 addr = dt.address + index * 8;
1231 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1235 /* Does not support long mode */
1236 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1237 u16 selector, int seg, u8 cpl,
1238 struct desc_struct *desc)
1240 struct desc_struct seg_desc;
1242 unsigned err_vec = GP_VECTOR;
1244 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1247 memset(&seg_desc, 0, sizeof seg_desc);
1249 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1250 || ctxt->mode == X86EMUL_MODE_REAL) {
1251 /* set real mode segment descriptor */
1252 set_desc_base(&seg_desc, selector << 4);
1253 set_desc_limit(&seg_desc, 0xffff);
1260 /* NULL selector is not valid for TR, CS and SS */
1261 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1265 /* TR should be in GDT only */
1266 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1269 if (null_selector) /* for NULL selector skip all following checks */
1272 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1273 if (ret != X86EMUL_CONTINUE)
1276 err_code = selector & 0xfffc;
1277 err_vec = GP_VECTOR;
1279 /* can't load system descriptor into segment selecor */
1280 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1284 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1294 * segment is not a writable data segment or segment
1295 * selector's RPL != CPL or segment selector's RPL != CPL
1297 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1301 if (!(seg_desc.type & 8))
1304 if (seg_desc.type & 4) {
1310 if (rpl > cpl || dpl != cpl)
1313 /* CS(RPL) <- CPL */
1314 selector = (selector & 0xfffc) | cpl;
1317 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1320 case VCPU_SREG_LDTR:
1321 if (seg_desc.s || seg_desc.type != 2)
1324 default: /* DS, ES, FS, or GS */
1326 * segment is not a data or readable code segment or
1327 * ((segment is a data or nonconforming code segment)
1328 * and (both RPL and CPL > DPL))
1330 if ((seg_desc.type & 0xa) == 0x8 ||
1331 (((seg_desc.type & 0xc) != 0xc) &&
1332 (rpl > dpl && cpl > dpl)))
1338 /* mark segment as accessed */
1340 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1341 if (ret != X86EMUL_CONTINUE)
1345 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1348 return X86EMUL_CONTINUE;
1350 emulate_exception(ctxt, err_vec, err_code, true);
1351 return X86EMUL_PROPAGATE_FAULT;
1354 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1355 u16 selector, int seg)
1357 u8 cpl = ctxt->ops->cpl(ctxt);
1358 return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL);
1361 static void write_register_operand(struct operand *op)
1363 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1364 switch (op->bytes) {
1366 *(u8 *)op->addr.reg = (u8)op->val;
1369 *(u16 *)op->addr.reg = (u16)op->val;
1372 *op->addr.reg = (u32)op->val;
1373 break; /* 64b: zero-extend */
1375 *op->addr.reg = op->val;
1380 static int writeback(struct x86_emulate_ctxt *ctxt)
1384 switch (ctxt->dst.type) {
1386 write_register_operand(&ctxt->dst);
1389 if (ctxt->lock_prefix)
1390 rc = segmented_cmpxchg(ctxt,
1392 &ctxt->dst.orig_val,
1396 rc = segmented_write(ctxt,
1400 if (rc != X86EMUL_CONTINUE)
1404 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1412 return X86EMUL_CONTINUE;
1415 static int em_push(struct x86_emulate_ctxt *ctxt)
1417 struct segmented_address addr;
1419 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1420 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1421 addr.seg = VCPU_SREG_SS;
1423 /* Disable writeback. */
1424 ctxt->dst.type = OP_NONE;
1425 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1428 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1429 void *dest, int len)
1432 struct segmented_address addr;
1434 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1435 addr.seg = VCPU_SREG_SS;
1436 rc = segmented_read(ctxt, addr, dest, len);
1437 if (rc != X86EMUL_CONTINUE)
1440 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1444 static int em_pop(struct x86_emulate_ctxt *ctxt)
1446 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1449 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1450 void *dest, int len)
1453 unsigned long val, change_mask;
1454 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1455 int cpl = ctxt->ops->cpl(ctxt);
1457 rc = emulate_pop(ctxt, &val, len);
1458 if (rc != X86EMUL_CONTINUE)
1461 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1462 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1464 switch(ctxt->mode) {
1465 case X86EMUL_MODE_PROT64:
1466 case X86EMUL_MODE_PROT32:
1467 case X86EMUL_MODE_PROT16:
1469 change_mask |= EFLG_IOPL;
1471 change_mask |= EFLG_IF;
1473 case X86EMUL_MODE_VM86:
1475 return emulate_gp(ctxt, 0);
1476 change_mask |= EFLG_IF;
1478 default: /* real mode */
1479 change_mask |= (EFLG_IOPL | EFLG_IF);
1483 *(unsigned long *)dest =
1484 (ctxt->eflags & ~change_mask) | (val & change_mask);
1489 static int em_popf(struct x86_emulate_ctxt *ctxt)
1491 ctxt->dst.type = OP_REG;
1492 ctxt->dst.addr.reg = &ctxt->eflags;
1493 ctxt->dst.bytes = ctxt->op_bytes;
1494 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1497 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1499 int seg = ctxt->src2.val;
1501 ctxt->src.val = get_segment_selector(ctxt, seg);
1503 return em_push(ctxt);
1506 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1508 int seg = ctxt->src2.val;
1509 unsigned long selector;
1512 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1513 if (rc != X86EMUL_CONTINUE)
1516 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1520 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1522 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1523 int rc = X86EMUL_CONTINUE;
1524 int reg = VCPU_REGS_RAX;
1526 while (reg <= VCPU_REGS_RDI) {
1527 (reg == VCPU_REGS_RSP) ?
1528 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1531 if (rc != X86EMUL_CONTINUE)
1540 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1542 ctxt->src.val = (unsigned long)ctxt->eflags;
1543 return em_push(ctxt);
1546 static int em_popa(struct x86_emulate_ctxt *ctxt)
1548 int rc = X86EMUL_CONTINUE;
1549 int reg = VCPU_REGS_RDI;
1551 while (reg >= VCPU_REGS_RAX) {
1552 if (reg == VCPU_REGS_RSP) {
1553 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1558 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1559 if (rc != X86EMUL_CONTINUE)
1566 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1568 struct x86_emulate_ops *ops = ctxt->ops;
1575 /* TODO: Add limit checks */
1576 ctxt->src.val = ctxt->eflags;
1578 if (rc != X86EMUL_CONTINUE)
1581 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1583 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1585 if (rc != X86EMUL_CONTINUE)
1588 ctxt->src.val = ctxt->_eip;
1590 if (rc != X86EMUL_CONTINUE)
1593 ops->get_idt(ctxt, &dt);
1595 eip_addr = dt.address + (irq << 2);
1596 cs_addr = dt.address + (irq << 2) + 2;
1598 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1599 if (rc != X86EMUL_CONTINUE)
1602 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1603 if (rc != X86EMUL_CONTINUE)
1606 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1607 if (rc != X86EMUL_CONTINUE)
1615 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1617 switch(ctxt->mode) {
1618 case X86EMUL_MODE_REAL:
1619 return emulate_int_real(ctxt, irq);
1620 case X86EMUL_MODE_VM86:
1621 case X86EMUL_MODE_PROT16:
1622 case X86EMUL_MODE_PROT32:
1623 case X86EMUL_MODE_PROT64:
1625 /* Protected mode interrupts unimplemented yet */
1626 return X86EMUL_UNHANDLEABLE;
1630 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1632 int rc = X86EMUL_CONTINUE;
1633 unsigned long temp_eip = 0;
1634 unsigned long temp_eflags = 0;
1635 unsigned long cs = 0;
1636 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1637 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1638 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1639 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1641 /* TODO: Add stack limit check */
1643 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1645 if (rc != X86EMUL_CONTINUE)
1648 if (temp_eip & ~0xffff)
1649 return emulate_gp(ctxt, 0);
1651 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1653 if (rc != X86EMUL_CONTINUE)
1656 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1658 if (rc != X86EMUL_CONTINUE)
1661 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1663 if (rc != X86EMUL_CONTINUE)
1666 ctxt->_eip = temp_eip;
1669 if (ctxt->op_bytes == 4)
1670 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1671 else if (ctxt->op_bytes == 2) {
1672 ctxt->eflags &= ~0xffff;
1673 ctxt->eflags |= temp_eflags;
1676 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1677 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1682 static int em_iret(struct x86_emulate_ctxt *ctxt)
1684 switch(ctxt->mode) {
1685 case X86EMUL_MODE_REAL:
1686 return emulate_iret_real(ctxt);
1687 case X86EMUL_MODE_VM86:
1688 case X86EMUL_MODE_PROT16:
1689 case X86EMUL_MODE_PROT32:
1690 case X86EMUL_MODE_PROT64:
1692 /* iret from protected mode unimplemented yet */
1693 return X86EMUL_UNHANDLEABLE;
1697 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1700 unsigned short sel, old_sel;
1701 struct desc_struct old_desc, new_desc;
1702 const struct x86_emulate_ops *ops = ctxt->ops;
1703 u8 cpl = ctxt->ops->cpl(ctxt);
1705 /* Assignment of RIP may only fail in 64-bit mode */
1706 if (ctxt->mode == X86EMUL_MODE_PROT64)
1707 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
1710 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1712 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
1714 if (rc != X86EMUL_CONTINUE)
1717 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1718 if (rc != X86EMUL_CONTINUE) {
1719 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
1720 /* assigning eip failed; restore the old cs */
1721 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
1727 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1729 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1732 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1734 switch (ctxt->modrm_reg) {
1736 emulate_2op_SrcB(ctxt, "rol");
1739 emulate_2op_SrcB(ctxt, "ror");
1742 emulate_2op_SrcB(ctxt, "rcl");
1745 emulate_2op_SrcB(ctxt, "rcr");
1747 case 4: /* sal/shl */
1748 case 6: /* sal/shl */
1749 emulate_2op_SrcB(ctxt, "sal");
1752 emulate_2op_SrcB(ctxt, "shr");
1755 emulate_2op_SrcB(ctxt, "sar");
1758 return X86EMUL_CONTINUE;
1761 static int em_not(struct x86_emulate_ctxt *ctxt)
1763 ctxt->dst.val = ~ctxt->dst.val;
1764 return X86EMUL_CONTINUE;
1767 static int em_neg(struct x86_emulate_ctxt *ctxt)
1769 emulate_1op(ctxt, "neg");
1770 return X86EMUL_CONTINUE;
1773 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1777 emulate_1op_rax_rdx(ctxt, "mul", ex);
1778 return X86EMUL_CONTINUE;
1781 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1785 emulate_1op_rax_rdx(ctxt, "imul", ex);
1786 return X86EMUL_CONTINUE;
1789 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1793 emulate_1op_rax_rdx(ctxt, "div", de);
1795 return emulate_de(ctxt);
1796 return X86EMUL_CONTINUE;
1799 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1803 emulate_1op_rax_rdx(ctxt, "idiv", de);
1805 return emulate_de(ctxt);
1806 return X86EMUL_CONTINUE;
1809 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1811 int rc = X86EMUL_CONTINUE;
1813 switch (ctxt->modrm_reg) {
1815 emulate_1op(ctxt, "inc");
1818 emulate_1op(ctxt, "dec");
1820 case 2: /* call near abs */ {
1822 old_eip = ctxt->_eip;
1823 rc = assign_eip_near(ctxt, ctxt->src.val);
1824 if (rc != X86EMUL_CONTINUE)
1826 ctxt->src.val = old_eip;
1830 case 4: /* jmp abs */
1831 rc = assign_eip_near(ctxt, ctxt->src.val);
1833 case 5: /* jmp far */
1834 rc = em_jmp_far(ctxt);
1843 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1845 u64 old = ctxt->dst.orig_val64;
1847 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1848 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1849 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1850 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1851 ctxt->eflags &= ~EFLG_ZF;
1853 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1854 (u32) ctxt->regs[VCPU_REGS_RBX];
1856 ctxt->eflags |= EFLG_ZF;
1858 return X86EMUL_CONTINUE;
1861 static int em_ret(struct x86_emulate_ctxt *ctxt)
1866 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1867 if (rc != X86EMUL_CONTINUE)
1870 return assign_eip_near(ctxt, eip);
1873 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1876 unsigned long eip, cs;
1878 int cpl = ctxt->ops->cpl(ctxt);
1879 struct desc_struct old_desc, new_desc;
1880 const struct x86_emulate_ops *ops = ctxt->ops;
1882 if (ctxt->mode == X86EMUL_MODE_PROT64)
1883 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
1886 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1887 if (rc != X86EMUL_CONTINUE)
1889 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1890 if (rc != X86EMUL_CONTINUE)
1892 /* Outer-privilege level return is not implemented */
1893 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
1894 return X86EMUL_UNHANDLEABLE;
1895 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0,
1897 if (rc != X86EMUL_CONTINUE)
1899 rc = assign_eip_far(ctxt, eip, new_desc.l);
1900 if (rc != X86EMUL_CONTINUE) {
1901 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
1902 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
1907 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1909 int seg = ctxt->src2.val;
1913 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1915 rc = load_segment_descriptor(ctxt, sel, seg);
1916 if (rc != X86EMUL_CONTINUE)
1919 ctxt->dst.val = ctxt->src.val;
1924 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1925 struct desc_struct *cs, struct desc_struct *ss)
1929 memset(cs, 0, sizeof(struct desc_struct));
1930 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1931 memset(ss, 0, sizeof(struct desc_struct));
1933 cs->l = 0; /* will be adjusted later */
1934 set_desc_base(cs, 0); /* flat segment */
1935 cs->g = 1; /* 4kb granularity */
1936 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1937 cs->type = 0x0b; /* Read, Execute, Accessed */
1939 cs->dpl = 0; /* will be adjusted later */
1943 set_desc_base(ss, 0); /* flat segment */
1944 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1945 ss->g = 1; /* 4kb granularity */
1947 ss->type = 0x03; /* Read/Write, Accessed */
1948 ss->d = 1; /* 32bit stack segment */
1953 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1955 struct x86_emulate_ops *ops = ctxt->ops;
1956 u32 eax, ebx, ecx, edx;
1959 * syscall should always be enabled in longmode - so only become
1960 * vendor specific (cpuid) if other modes are active...
1962 if (ctxt->mode == X86EMUL_MODE_PROT64)
1967 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1969 * Intel ("GenuineIntel")
1970 * remark: Intel CPUs only support "syscall" in 64bit
1971 * longmode. Also an 64bit guest with a
1972 * 32bit compat-app running will #UD !! While this
1973 * behaviour can be fixed (by emulating) into AMD
1974 * response - CPUs of AMD can't behave like Intel.
1976 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1977 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1978 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1981 /* AMD ("AuthenticAMD") */
1982 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1983 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1984 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
1987 /* AMD ("AMDisbetter!") */
1988 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
1989 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
1990 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
1994 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
1998 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2000 struct x86_emulate_ops *ops = ctxt->ops;
2001 struct desc_struct cs, ss;
2006 /* syscall is not available in real mode */
2007 if (ctxt->mode == X86EMUL_MODE_REAL ||
2008 ctxt->mode == X86EMUL_MODE_VM86)
2009 return emulate_ud(ctxt);
2011 if (!(em_syscall_is_enabled(ctxt)))
2012 return emulate_ud(ctxt);
2014 ops->get_msr(ctxt, MSR_EFER, &efer);
2015 setup_syscalls_segments(ctxt, &cs, &ss);
2017 if (!(efer & EFER_SCE))
2018 return emulate_ud(ctxt);
2020 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2022 cs_sel = (u16)(msr_data & 0xfffc);
2023 ss_sel = (u16)(msr_data + 8);
2025 if (efer & EFER_LMA) {
2029 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2030 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2032 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2033 if (efer & EFER_LMA) {
2034 #ifdef CONFIG_X86_64
2035 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2038 ctxt->mode == X86EMUL_MODE_PROT64 ?
2039 MSR_LSTAR : MSR_CSTAR, &msr_data);
2040 ctxt->_eip = msr_data;
2042 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2043 ctxt->eflags &= ~(msr_data | EFLG_RF);
2047 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2048 ctxt->_eip = (u32)msr_data;
2050 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2053 return X86EMUL_CONTINUE;
2056 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2058 struct x86_emulate_ops *ops = ctxt->ops;
2059 struct desc_struct cs, ss;
2064 ops->get_msr(ctxt, MSR_EFER, &efer);
2065 /* inject #GP if in real mode */
2066 if (ctxt->mode == X86EMUL_MODE_REAL)
2067 return emulate_gp(ctxt, 0);
2069 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2070 * Therefore, we inject an #UD.
2072 if (ctxt->mode == X86EMUL_MODE_PROT64)
2073 return emulate_ud(ctxt);
2075 setup_syscalls_segments(ctxt, &cs, &ss);
2077 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2078 switch (ctxt->mode) {
2079 case X86EMUL_MODE_PROT32:
2080 if ((msr_data & 0xfffc) == 0x0)
2081 return emulate_gp(ctxt, 0);
2083 case X86EMUL_MODE_PROT64:
2084 if (msr_data == 0x0)
2085 return emulate_gp(ctxt, 0);
2089 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2090 cs_sel = (u16)msr_data;
2091 cs_sel &= ~SELECTOR_RPL_MASK;
2092 ss_sel = cs_sel + 8;
2093 ss_sel &= ~SELECTOR_RPL_MASK;
2094 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2099 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2100 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2102 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2103 ctxt->_eip = msr_data;
2105 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2106 ctxt->regs[VCPU_REGS_RSP] = msr_data;
2108 return X86EMUL_CONTINUE;
2111 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2113 struct x86_emulate_ops *ops = ctxt->ops;
2114 struct desc_struct cs, ss;
2115 u64 msr_data, rcx, rdx;
2117 u16 cs_sel = 0, ss_sel = 0;
2119 /* inject #GP if in real mode or Virtual 8086 mode */
2120 if (ctxt->mode == X86EMUL_MODE_REAL ||
2121 ctxt->mode == X86EMUL_MODE_VM86)
2122 return emulate_gp(ctxt, 0);
2124 setup_syscalls_segments(ctxt, &cs, &ss);
2126 if ((ctxt->rex_prefix & 0x8) != 0x0)
2127 usermode = X86EMUL_MODE_PROT64;
2129 usermode = X86EMUL_MODE_PROT32;
2131 rcx = ctxt->regs[VCPU_REGS_RCX];
2132 rdx = ctxt->regs[VCPU_REGS_RDX];
2136 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2138 case X86EMUL_MODE_PROT32:
2139 cs_sel = (u16)(msr_data + 16);
2140 if ((msr_data & 0xfffc) == 0x0)
2141 return emulate_gp(ctxt, 0);
2142 ss_sel = (u16)(msr_data + 24);
2144 case X86EMUL_MODE_PROT64:
2145 cs_sel = (u16)(msr_data + 32);
2146 if (msr_data == 0x0)
2147 return emulate_gp(ctxt, 0);
2148 ss_sel = cs_sel + 8;
2151 if (is_noncanonical_address(rcx) ||
2152 is_noncanonical_address(rdx))
2153 return emulate_gp(ctxt, 0);
2156 cs_sel |= SELECTOR_RPL_MASK;
2157 ss_sel |= SELECTOR_RPL_MASK;
2159 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2160 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2163 ctxt->regs[VCPU_REGS_RSP] = rcx;
2165 return X86EMUL_CONTINUE;
2168 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2171 if (ctxt->mode == X86EMUL_MODE_REAL)
2173 if (ctxt->mode == X86EMUL_MODE_VM86)
2175 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2176 return ctxt->ops->cpl(ctxt) > iopl;
2179 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2182 struct x86_emulate_ops *ops = ctxt->ops;
2183 struct desc_struct tr_seg;
2186 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2187 unsigned mask = (1 << len) - 1;
2190 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2193 if (desc_limit_scaled(&tr_seg) < 103)
2195 base = get_desc_base(&tr_seg);
2196 #ifdef CONFIG_X86_64
2197 base |= ((u64)base3) << 32;
2199 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2200 if (r != X86EMUL_CONTINUE)
2202 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2204 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2205 if (r != X86EMUL_CONTINUE)
2207 if ((perm >> bit_idx) & mask)
2212 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2218 if (emulator_bad_iopl(ctxt))
2219 if (!emulator_io_port_access_allowed(ctxt, port, len))
2222 ctxt->perm_ok = true;
2227 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2228 struct tss_segment_16 *tss)
2230 tss->ip = ctxt->_eip;
2231 tss->flag = ctxt->eflags;
2232 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2233 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2234 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2235 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2236 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2237 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2238 tss->si = ctxt->regs[VCPU_REGS_RSI];
2239 tss->di = ctxt->regs[VCPU_REGS_RDI];
2241 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2242 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2243 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2244 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2245 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2248 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2249 struct tss_segment_16 *tss)
2254 ctxt->_eip = tss->ip;
2255 ctxt->eflags = tss->flag | 2;
2256 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2257 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2258 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2259 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2260 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2261 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2262 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2263 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2266 * SDM says that segment selectors are loaded before segment
2269 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2270 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2271 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2272 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2273 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2278 * Now load segment descriptors. If fault happenes at this stage
2279 * it is handled in a context of new task
2281 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2283 if (ret != X86EMUL_CONTINUE)
2285 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2287 if (ret != X86EMUL_CONTINUE)
2289 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2291 if (ret != X86EMUL_CONTINUE)
2293 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2295 if (ret != X86EMUL_CONTINUE)
2297 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2299 if (ret != X86EMUL_CONTINUE)
2302 return X86EMUL_CONTINUE;
2305 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2306 u16 tss_selector, u16 old_tss_sel,
2307 ulong old_tss_base, struct desc_struct *new_desc)
2309 struct x86_emulate_ops *ops = ctxt->ops;
2310 struct tss_segment_16 tss_seg;
2312 u32 new_tss_base = get_desc_base(new_desc);
2314 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2316 if (ret != X86EMUL_CONTINUE)
2317 /* FIXME: need to provide precise fault address */
2320 save_state_to_tss16(ctxt, &tss_seg);
2322 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2324 if (ret != X86EMUL_CONTINUE)
2325 /* FIXME: need to provide precise fault address */
2328 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2330 if (ret != X86EMUL_CONTINUE)
2331 /* FIXME: need to provide precise fault address */
2334 if (old_tss_sel != 0xffff) {
2335 tss_seg.prev_task_link = old_tss_sel;
2337 ret = ops->write_std(ctxt, new_tss_base,
2338 &tss_seg.prev_task_link,
2339 sizeof tss_seg.prev_task_link,
2341 if (ret != X86EMUL_CONTINUE)
2342 /* FIXME: need to provide precise fault address */
2346 return load_state_from_tss16(ctxt, &tss_seg);
2349 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2350 struct tss_segment_32 *tss)
2352 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2353 tss->eip = ctxt->_eip;
2354 tss->eflags = ctxt->eflags;
2355 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2356 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2357 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2358 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2359 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2360 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2361 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2362 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2364 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2365 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2366 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2367 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2368 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2369 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2370 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2373 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2374 struct tss_segment_32 *tss)
2379 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2380 return emulate_gp(ctxt, 0);
2381 ctxt->_eip = tss->eip;
2382 ctxt->eflags = tss->eflags | 2;
2383 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2384 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2385 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2386 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2387 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2388 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2389 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2390 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2393 * SDM says that segment selectors are loaded before segment
2394 * descriptors. This is important because CPL checks will
2397 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2398 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2399 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2400 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2401 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2402 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2403 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2408 * Now load segment descriptors. If fault happenes at this stage
2409 * it is handled in a context of new task
2411 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2413 if (ret != X86EMUL_CONTINUE)
2415 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2417 if (ret != X86EMUL_CONTINUE)
2419 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2421 if (ret != X86EMUL_CONTINUE)
2423 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2425 if (ret != X86EMUL_CONTINUE)
2427 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2429 if (ret != X86EMUL_CONTINUE)
2431 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2433 if (ret != X86EMUL_CONTINUE)
2435 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2437 if (ret != X86EMUL_CONTINUE)
2440 return X86EMUL_CONTINUE;
2443 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2444 u16 tss_selector, u16 old_tss_sel,
2445 ulong old_tss_base, struct desc_struct *new_desc)
2447 struct x86_emulate_ops *ops = ctxt->ops;
2448 struct tss_segment_32 tss_seg;
2450 u32 new_tss_base = get_desc_base(new_desc);
2452 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2454 if (ret != X86EMUL_CONTINUE)
2455 /* FIXME: need to provide precise fault address */
2458 save_state_to_tss32(ctxt, &tss_seg);
2460 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2462 if (ret != X86EMUL_CONTINUE)
2463 /* FIXME: need to provide precise fault address */
2466 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2468 if (ret != X86EMUL_CONTINUE)
2469 /* FIXME: need to provide precise fault address */
2472 if (old_tss_sel != 0xffff) {
2473 tss_seg.prev_task_link = old_tss_sel;
2475 ret = ops->write_std(ctxt, new_tss_base,
2476 &tss_seg.prev_task_link,
2477 sizeof tss_seg.prev_task_link,
2479 if (ret != X86EMUL_CONTINUE)
2480 /* FIXME: need to provide precise fault address */
2484 return load_state_from_tss32(ctxt, &tss_seg);
2487 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2488 u16 tss_selector, int reason,
2489 bool has_error_code, u32 error_code)
2491 struct x86_emulate_ops *ops = ctxt->ops;
2492 struct desc_struct curr_tss_desc, next_tss_desc;
2494 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2495 ulong old_tss_base =
2496 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2499 /* FIXME: old_tss_base == ~0 ? */
2501 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2502 if (ret != X86EMUL_CONTINUE)
2504 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2505 if (ret != X86EMUL_CONTINUE)
2508 /* FIXME: check that next_tss_desc is tss */
2510 if (reason != TASK_SWITCH_IRET) {
2511 if ((tss_selector & 3) > next_tss_desc.dpl ||
2512 ops->cpl(ctxt) > next_tss_desc.dpl)
2513 return emulate_gp(ctxt, 0);
2516 desc_limit = desc_limit_scaled(&next_tss_desc);
2517 if (!next_tss_desc.p ||
2518 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2519 desc_limit < 0x2b)) {
2520 emulate_ts(ctxt, tss_selector & 0xfffc);
2521 return X86EMUL_PROPAGATE_FAULT;
2524 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2525 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2526 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2529 if (reason == TASK_SWITCH_IRET)
2530 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2532 /* set back link to prev task only if NT bit is set in eflags
2533 note that old_tss_sel is not used afetr this point */
2534 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2535 old_tss_sel = 0xffff;
2537 if (next_tss_desc.type & 8)
2538 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2539 old_tss_base, &next_tss_desc);
2541 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2542 old_tss_base, &next_tss_desc);
2543 if (ret != X86EMUL_CONTINUE)
2546 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2547 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2549 if (reason != TASK_SWITCH_IRET) {
2550 next_tss_desc.type |= (1 << 1); /* set busy flag */
2551 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2554 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2555 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2557 if (has_error_code) {
2558 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2559 ctxt->lock_prefix = 0;
2560 ctxt->src.val = (unsigned long) error_code;
2561 ret = em_push(ctxt);
2567 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2568 u16 tss_selector, int reason,
2569 bool has_error_code, u32 error_code)
2573 ctxt->_eip = ctxt->eip;
2574 ctxt->dst.type = OP_NONE;
2576 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2577 has_error_code, error_code);
2579 if (rc == X86EMUL_CONTINUE)
2580 ctxt->eip = ctxt->_eip;
2582 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2585 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2586 int reg, struct operand *op)
2588 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2590 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2591 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2592 op->addr.mem.seg = seg;
2595 static int em_das(struct x86_emulate_ctxt *ctxt)
2598 bool af, cf, old_cf;
2600 cf = ctxt->eflags & X86_EFLAGS_CF;
2606 af = ctxt->eflags & X86_EFLAGS_AF;
2607 if ((al & 0x0f) > 9 || af) {
2609 cf = old_cf | (al >= 250);
2614 if (old_al > 0x99 || old_cf) {
2620 /* Set PF, ZF, SF */
2621 ctxt->src.type = OP_IMM;
2623 ctxt->src.bytes = 1;
2624 emulate_2op_SrcV(ctxt, "or");
2625 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2627 ctxt->eflags |= X86_EFLAGS_CF;
2629 ctxt->eflags |= X86_EFLAGS_AF;
2630 return X86EMUL_CONTINUE;
2633 static int em_call(struct x86_emulate_ctxt *ctxt)
2636 long rel = ctxt->src.val;
2638 ctxt->src.val = (unsigned long)ctxt->_eip;
2639 rc = jmp_rel(ctxt, rel);
2640 if (rc != X86EMUL_CONTINUE)
2642 return em_push(ctxt);
2645 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2650 struct desc_struct old_desc, new_desc;
2651 const struct x86_emulate_ops *ops = ctxt->ops;
2652 int cpl = ctxt->ops->cpl(ctxt);
2654 old_eip = ctxt->_eip;
2655 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
2657 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2658 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2660 if (rc != X86EMUL_CONTINUE)
2661 return X86EMUL_CONTINUE;
2663 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2664 if (rc != X86EMUL_CONTINUE)
2667 ctxt->src.val = old_cs;
2669 if (rc != X86EMUL_CONTINUE)
2672 ctxt->src.val = old_eip;
2674 /* If we failed, we tainted the memory, but the very least we should
2676 if (rc != X86EMUL_CONTINUE)
2680 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2685 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2690 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2691 if (rc != X86EMUL_CONTINUE)
2693 rc = assign_eip_near(ctxt, eip);
2694 if (rc != X86EMUL_CONTINUE)
2696 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2697 return X86EMUL_CONTINUE;
2700 static int em_add(struct x86_emulate_ctxt *ctxt)
2702 emulate_2op_SrcV(ctxt, "add");
2703 return X86EMUL_CONTINUE;
2706 static int em_or(struct x86_emulate_ctxt *ctxt)
2708 emulate_2op_SrcV(ctxt, "or");
2709 return X86EMUL_CONTINUE;
2712 static int em_adc(struct x86_emulate_ctxt *ctxt)
2714 emulate_2op_SrcV(ctxt, "adc");
2715 return X86EMUL_CONTINUE;
2718 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2720 emulate_2op_SrcV(ctxt, "sbb");
2721 return X86EMUL_CONTINUE;
2724 static int em_and(struct x86_emulate_ctxt *ctxt)
2726 emulate_2op_SrcV(ctxt, "and");
2727 return X86EMUL_CONTINUE;
2730 static int em_sub(struct x86_emulate_ctxt *ctxt)
2732 emulate_2op_SrcV(ctxt, "sub");
2733 return X86EMUL_CONTINUE;
2736 static int em_xor(struct x86_emulate_ctxt *ctxt)
2738 emulate_2op_SrcV(ctxt, "xor");
2739 return X86EMUL_CONTINUE;
2742 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2744 emulate_2op_SrcV(ctxt, "cmp");
2745 /* Disable writeback. */
2746 ctxt->dst.type = OP_NONE;
2747 return X86EMUL_CONTINUE;
2750 static int em_test(struct x86_emulate_ctxt *ctxt)
2752 emulate_2op_SrcV(ctxt, "test");
2753 /* Disable writeback. */
2754 ctxt->dst.type = OP_NONE;
2755 return X86EMUL_CONTINUE;
2758 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2760 /* Write back the register source. */
2761 ctxt->src.val = ctxt->dst.val;
2762 write_register_operand(&ctxt->src);
2764 /* Write back the memory destination with implicit LOCK prefix. */
2765 ctxt->dst.val = ctxt->src.orig_val;
2766 ctxt->lock_prefix = 1;
2767 return X86EMUL_CONTINUE;
2770 static int em_imul(struct x86_emulate_ctxt *ctxt)
2772 emulate_2op_SrcV_nobyte(ctxt, "imul");
2773 return X86EMUL_CONTINUE;
2776 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2778 ctxt->dst.val = ctxt->src2.val;
2779 return em_imul(ctxt);
2782 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2784 ctxt->dst.type = OP_REG;
2785 ctxt->dst.bytes = ctxt->src.bytes;
2786 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2787 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2789 return X86EMUL_CONTINUE;
2792 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2796 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2797 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2798 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2799 return X86EMUL_CONTINUE;
2802 static int em_mov(struct x86_emulate_ctxt *ctxt)
2804 ctxt->dst.val = ctxt->src.val;
2805 return X86EMUL_CONTINUE;
2808 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2810 if (ctxt->modrm_reg > VCPU_SREG_GS)
2811 return emulate_ud(ctxt);
2813 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2814 return X86EMUL_CONTINUE;
2817 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2819 u16 sel = ctxt->src.val;
2821 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2822 return emulate_ud(ctxt);
2824 if (ctxt->modrm_reg == VCPU_SREG_SS)
2825 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2827 /* Disable writeback. */
2828 ctxt->dst.type = OP_NONE;
2829 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2832 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2834 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2835 return X86EMUL_CONTINUE;
2838 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2843 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2844 if (rc == X86EMUL_CONTINUE)
2845 ctxt->ops->invlpg(ctxt, linear);
2846 /* Disable writeback. */
2847 ctxt->dst.type = OP_NONE;
2848 return X86EMUL_CONTINUE;
2851 static int em_clts(struct x86_emulate_ctxt *ctxt)
2855 cr0 = ctxt->ops->get_cr(ctxt, 0);
2857 ctxt->ops->set_cr(ctxt, 0, cr0);
2858 return X86EMUL_CONTINUE;
2861 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2865 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2866 return X86EMUL_UNHANDLEABLE;
2868 rc = ctxt->ops->fix_hypercall(ctxt);
2869 if (rc != X86EMUL_CONTINUE)
2872 /* Let the processor re-execute the fixed hypercall */
2873 ctxt->_eip = ctxt->eip;
2874 /* Disable writeback. */
2875 ctxt->dst.type = OP_NONE;
2876 return X86EMUL_CONTINUE;
2879 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2881 struct desc_ptr desc_ptr;
2884 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2885 &desc_ptr.size, &desc_ptr.address,
2887 if (rc != X86EMUL_CONTINUE)
2889 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2890 /* Disable writeback. */
2891 ctxt->dst.type = OP_NONE;
2892 return X86EMUL_CONTINUE;
2895 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2899 rc = ctxt->ops->fix_hypercall(ctxt);
2901 /* Disable writeback. */
2902 ctxt->dst.type = OP_NONE;
2906 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2908 struct desc_ptr desc_ptr;
2911 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2912 &desc_ptr.size, &desc_ptr.address,
2914 if (rc != X86EMUL_CONTINUE)
2916 ctxt->ops->set_idt(ctxt, &desc_ptr);
2917 /* Disable writeback. */
2918 ctxt->dst.type = OP_NONE;
2919 return X86EMUL_CONTINUE;
2922 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2924 ctxt->dst.bytes = 2;
2925 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2926 return X86EMUL_CONTINUE;
2929 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2931 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2932 | (ctxt->src.val & 0x0f));
2933 ctxt->dst.type = OP_NONE;
2934 return X86EMUL_CONTINUE;
2937 static int em_loop(struct x86_emulate_ctxt *ctxt)
2939 int rc = X86EMUL_CONTINUE;
2941 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2942 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2943 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2944 rc = jmp_rel(ctxt, ctxt->src.val);
2949 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2951 int rc = X86EMUL_CONTINUE;
2953 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2954 rc = jmp_rel(ctxt, ctxt->src.val);
2959 static int em_cli(struct x86_emulate_ctxt *ctxt)
2961 if (emulator_bad_iopl(ctxt))
2962 return emulate_gp(ctxt, 0);
2964 ctxt->eflags &= ~X86_EFLAGS_IF;
2965 return X86EMUL_CONTINUE;
2968 static int em_sti(struct x86_emulate_ctxt *ctxt)
2970 if (emulator_bad_iopl(ctxt))
2971 return emulate_gp(ctxt, 0);
2973 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2974 ctxt->eflags |= X86_EFLAGS_IF;
2975 return X86EMUL_CONTINUE;
2978 static bool valid_cr(int nr)
2990 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2992 if (!valid_cr(ctxt->modrm_reg))
2993 return emulate_ud(ctxt);
2995 return X86EMUL_CONTINUE;
2998 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3000 u64 new_val = ctxt->src.val64;
3001 int cr = ctxt->modrm_reg;
3004 static u64 cr_reserved_bits[] = {
3005 0xffffffff00000000ULL,
3006 0, 0, 0, /* CR3 checked later */
3013 return emulate_ud(ctxt);
3015 if (new_val & cr_reserved_bits[cr])
3016 return emulate_gp(ctxt, 0);
3021 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3022 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3023 return emulate_gp(ctxt, 0);
3025 cr4 = ctxt->ops->get_cr(ctxt, 4);
3026 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3028 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3029 !(cr4 & X86_CR4_PAE))
3030 return emulate_gp(ctxt, 0);
3037 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3038 if (efer & EFER_LMA)
3039 rsvd = CR3_L_MODE_RESERVED_BITS;
3040 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3041 rsvd = CR3_PAE_RESERVED_BITS;
3042 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3043 rsvd = CR3_NONPAE_RESERVED_BITS;
3046 return emulate_gp(ctxt, 0);
3053 cr4 = ctxt->ops->get_cr(ctxt, 4);
3054 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3056 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3057 return emulate_gp(ctxt, 0);
3063 return X86EMUL_CONTINUE;
3066 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3070 ctxt->ops->get_dr(ctxt, 7, &dr7);
3072 /* Check if DR7.Global_Enable is set */
3073 return dr7 & (1 << 13);
3076 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3078 int dr = ctxt->modrm_reg;
3082 return emulate_ud(ctxt);
3084 cr4 = ctxt->ops->get_cr(ctxt, 4);
3085 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3086 return emulate_ud(ctxt);
3088 if (check_dr7_gd(ctxt))
3089 return emulate_db(ctxt);
3091 return X86EMUL_CONTINUE;
3094 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3096 u64 new_val = ctxt->src.val64;
3097 int dr = ctxt->modrm_reg;
3099 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3100 return emulate_gp(ctxt, 0);
3102 return check_dr_read(ctxt);
3105 static int check_svme(struct x86_emulate_ctxt *ctxt)
3109 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3111 if (!(efer & EFER_SVME))
3112 return emulate_ud(ctxt);
3114 return X86EMUL_CONTINUE;
3117 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3119 u64 rax = ctxt->regs[VCPU_REGS_RAX];
3121 /* Valid physical address? */
3122 if (rax & 0xffff000000000000ULL)
3123 return emulate_gp(ctxt, 0);
3125 return check_svme(ctxt);
3128 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3130 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3132 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3133 return emulate_ud(ctxt);
3135 return X86EMUL_CONTINUE;
3138 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3140 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3141 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3143 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3145 return emulate_gp(ctxt, 0);
3147 return X86EMUL_CONTINUE;
3150 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3152 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3153 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3154 return emulate_gp(ctxt, 0);
3156 return X86EMUL_CONTINUE;
3159 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3161 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3162 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3163 return emulate_gp(ctxt, 0);
3165 return X86EMUL_CONTINUE;
3168 #define D(_y) { .flags = (_y) }
3169 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3170 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3171 .check_perm = (_p) }
3173 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3174 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3175 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3176 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3177 #define II(_f, _e, _i) \
3178 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3179 #define IIP(_f, _e, _i, _p) \
3180 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3181 .check_perm = (_p) }
3182 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3184 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3185 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3186 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3188 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3189 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3190 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3192 static struct opcode group7_rm1[] = {
3193 DI(SrcNone | ModRM | Priv, monitor),
3194 DI(SrcNone | ModRM | Priv, mwait),
3198 static struct opcode group7_rm3[] = {
3199 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3200 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3201 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3202 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3203 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3204 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3205 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3206 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3209 static struct opcode group7_rm7[] = {
3211 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3215 static struct opcode group1[] = {
3226 static struct opcode group1A[] = {
3227 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3230 static struct opcode group3[] = {
3231 I(DstMem | SrcImm | ModRM, em_test),
3232 I(DstMem | SrcImm | ModRM, em_test),
3233 I(DstMem | SrcNone | ModRM | Lock, em_not),
3234 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3235 I(SrcMem | ModRM, em_mul_ex),
3236 I(SrcMem | ModRM, em_imul_ex),
3237 I(SrcMem | ModRM, em_div_ex),
3238 I(SrcMem | ModRM, em_idiv_ex),
3241 static struct opcode group4[] = {
3242 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3246 static struct opcode group5[] = {
3247 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3248 D(SrcMem | ModRM | Stack),
3249 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3250 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3251 D(SrcMem | ModRM | Stack), N,
3254 static struct opcode group6[] = {
3255 DI(ModRM | Prot, sldt),
3256 DI(ModRM | Prot, str),
3257 DI(ModRM | Prot | Priv, lldt),
3258 DI(ModRM | Prot | Priv, ltr),
3262 static struct group_dual group7 = { {
3263 DI(ModRM | Mov | DstMem | Priv, sgdt),
3264 DI(ModRM | Mov | DstMem | Priv, sidt),
3265 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3266 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3267 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3268 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3269 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3271 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3273 N, EXT(0, group7_rm3),
3274 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3275 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3278 static struct opcode group8[] = {
3280 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3281 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3284 static struct group_dual group9 = { {
3285 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3287 N, N, N, N, N, N, N, N,
3290 static struct opcode group11[] = {
3291 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3294 static struct gprefix pfx_0f_6f_0f_7f = {
3295 N, N, N, I(Sse, em_movdqu),
3298 static struct opcode opcode_table[256] = {
3300 I6ALU(Lock, em_add),
3301 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3302 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3305 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3308 I6ALU(Lock, em_adc),
3309 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3310 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3312 I6ALU(Lock, em_sbb),
3313 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3314 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3316 I6ALU(Lock, em_and), N, N,
3318 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3320 I6ALU(Lock, em_xor), N, N,
3322 I6ALU(0, em_cmp), N, N,
3326 X8(I(SrcReg | Stack, em_push)),
3328 X8(I(DstReg | Stack, em_pop)),
3330 I(ImplicitOps | Stack | No64, em_pusha),
3331 I(ImplicitOps | Stack | No64, em_popa),
3332 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3335 I(SrcImm | Mov | Stack, em_push),
3336 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3337 I(SrcImmByte | Mov | Stack, em_push),
3338 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3339 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3340 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3344 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3345 G(DstMem | SrcImm | ModRM | Group, group1),
3346 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3347 G(DstMem | SrcImmByte | ModRM | Group, group1),
3348 I2bv(DstMem | SrcReg | ModRM, em_test),
3349 I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3351 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3352 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3353 I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3354 D(ModRM | SrcMem | NoAccess | DstReg),
3355 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3358 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3360 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3361 I(SrcImmFAddr | No64, em_call_far), N,
3362 II(ImplicitOps | Stack, em_pushf, pushf),
3363 II(ImplicitOps | Stack, em_popf, popf), N, N,
3365 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3366 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3367 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3368 I2bv(SrcSI | DstDI | String, em_cmp),
3370 I2bv(DstAcc | SrcImm, em_test),
3371 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3372 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3373 I2bv(SrcAcc | DstDI | String, em_cmp),
3375 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3377 X8(I(DstReg | SrcImm | Mov, em_mov)),
3379 D2bv(DstMem | SrcImmByte | ModRM),
3380 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3381 I(ImplicitOps | Stack, em_ret),
3382 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3383 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3384 G(ByteOp, group11), G(0, group11),
3386 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3387 D(ImplicitOps), DI(SrcImmByte, intn),
3388 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3390 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3393 N, N, N, N, N, N, N, N,
3395 X3(I(SrcImmByte, em_loop)),
3396 I(SrcImmByte, em_jcxz),
3397 D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
3398 D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3400 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3401 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3402 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3403 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3405 N, DI(ImplicitOps, icebp), N, N,
3406 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3407 G(ByteOp, group3), G(0, group3),
3409 D(ImplicitOps), D(ImplicitOps),
3410 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3411 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3414 static struct opcode twobyte_table[256] = {
3416 G(0, group6), GD(0, &group7), N, N,
3417 N, I(ImplicitOps | VendorSpecific, em_syscall),
3418 II(ImplicitOps | Priv, em_clts, clts), N,
3419 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3420 N, D(ImplicitOps | ModRM), N, N,
3422 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3424 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3425 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3426 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3427 DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3429 N, N, N, N, N, N, N, N,
3431 DI(ImplicitOps | Priv, wrmsr),
3432 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3433 DI(ImplicitOps | Priv, rdmsr),
3434 DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3435 I(ImplicitOps | VendorSpecific, em_sysenter),
3436 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3438 N, N, N, N, N, N, N, N,
3440 X16(D(DstReg | SrcMem | ModRM | Mov)),
3442 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3447 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3452 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3456 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3458 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3459 DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3460 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3461 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3463 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3464 DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3465 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3466 D(DstMem | SrcReg | Src2CL | ModRM),
3467 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3469 D2bv(DstMem | SrcReg | ModRM | Lock),
3470 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3471 D(DstMem | SrcReg | ModRM | BitOp | Lock),
3472 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3473 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3474 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3477 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3478 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3479 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3481 D2bv(DstMem | SrcReg | ModRM | Lock),
3482 N, D(DstMem | SrcReg | ModRM | Mov),
3483 N, N, N, GD(0, &group9),
3484 N, N, N, N, N, N, N, N,
3486 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3488 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3490 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3506 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3510 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3516 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3517 unsigned size, bool sign_extension)
3519 int rc = X86EMUL_CONTINUE;
3523 op->addr.mem.ea = ctxt->_eip;
3524 /* NB. Immediates are sign-extended as necessary. */
3525 switch (op->bytes) {
3527 op->val = insn_fetch(s8, ctxt);
3530 op->val = insn_fetch(s16, ctxt);
3533 op->val = insn_fetch(s32, ctxt);
3536 if (!sign_extension) {
3537 switch (op->bytes) {
3545 op->val &= 0xffffffff;
3553 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3556 int rc = X86EMUL_CONTINUE;
3560 decode_register_operand(ctxt, op,
3562 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3565 rc = decode_imm(ctxt, op, 1, false);
3568 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3572 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3573 fetch_bit_operand(ctxt);
3574 op->orig_val = op->val;
3577 ctxt->memop.bytes = 8;
3581 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3582 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3583 fetch_register_operand(op);
3584 op->orig_val = op->val;
3588 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3590 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3591 op->addr.mem.seg = VCPU_SREG_ES;
3597 op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3598 fetch_register_operand(op);
3602 op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3605 rc = decode_imm(ctxt, op, 1, true);
3612 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3615 ctxt->memop.bytes = 2;
3618 ctxt->memop.bytes = 4;
3621 rc = decode_imm(ctxt, op, 2, false);
3624 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3628 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3630 register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3631 op->addr.mem.seg = seg_override(ctxt);
3636 op->addr.mem.ea = ctxt->_eip;
3637 op->bytes = ctxt->op_bytes + 2;
3638 insn_fetch_arr(op->valptr, op->bytes, ctxt);
3641 ctxt->memop.bytes = ctxt->op_bytes + 2;
3644 op->val = VCPU_SREG_ES;
3647 op->val = VCPU_SREG_CS;
3650 op->val = VCPU_SREG_SS;
3653 op->val = VCPU_SREG_DS;
3656 op->val = VCPU_SREG_FS;
3659 op->val = VCPU_SREG_GS;
3662 /* Special instructions do their own operand decoding. */
3664 op->type = OP_NONE; /* Disable writeback. */
3672 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3674 int rc = X86EMUL_CONTINUE;
3675 int mode = ctxt->mode;
3676 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3677 bool op_prefix = false;
3678 struct opcode opcode;
3680 ctxt->memop.type = OP_NONE;
3681 ctxt->memopp = NULL;
3682 ctxt->_eip = ctxt->eip;
3683 ctxt->fetch.start = ctxt->_eip;
3684 ctxt->fetch.end = ctxt->fetch.start + insn_len;
3686 memcpy(ctxt->fetch.data, insn, insn_len);
3689 case X86EMUL_MODE_REAL:
3690 case X86EMUL_MODE_VM86:
3691 case X86EMUL_MODE_PROT16:
3692 def_op_bytes = def_ad_bytes = 2;
3694 case X86EMUL_MODE_PROT32:
3695 def_op_bytes = def_ad_bytes = 4;
3697 #ifdef CONFIG_X86_64
3698 case X86EMUL_MODE_PROT64:
3704 return EMULATION_FAILED;
3707 ctxt->op_bytes = def_op_bytes;
3708 ctxt->ad_bytes = def_ad_bytes;
3710 /* Legacy prefixes. */
3712 switch (ctxt->b = insn_fetch(u8, ctxt)) {
3713 case 0x66: /* operand-size override */
3715 /* switch between 2/4 bytes */
3716 ctxt->op_bytes = def_op_bytes ^ 6;
3718 case 0x67: /* address-size override */
3719 if (mode == X86EMUL_MODE_PROT64)
3720 /* switch between 4/8 bytes */
3721 ctxt->ad_bytes = def_ad_bytes ^ 12;
3723 /* switch between 2/4 bytes */
3724 ctxt->ad_bytes = def_ad_bytes ^ 6;
3726 case 0x26: /* ES override */
3727 case 0x2e: /* CS override */
3728 case 0x36: /* SS override */
3729 case 0x3e: /* DS override */
3730 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3732 case 0x64: /* FS override */
3733 case 0x65: /* GS override */
3734 set_seg_override(ctxt, ctxt->b & 7);
3736 case 0x40 ... 0x4f: /* REX */
3737 if (mode != X86EMUL_MODE_PROT64)
3739 ctxt->rex_prefix = ctxt->b;
3741 case 0xf0: /* LOCK */
3742 ctxt->lock_prefix = 1;
3744 case 0xf2: /* REPNE/REPNZ */
3745 case 0xf3: /* REP/REPE/REPZ */
3746 ctxt->rep_prefix = ctxt->b;
3752 /* Any legacy prefix after a REX prefix nullifies its effect. */
3754 ctxt->rex_prefix = 0;
3760 if (ctxt->rex_prefix & 8)
3761 ctxt->op_bytes = 8; /* REX.W */
3763 /* Opcode byte(s). */
3764 opcode = opcode_table[ctxt->b];
3765 /* Two-byte opcode? */
3766 if (ctxt->b == 0x0f) {
3768 ctxt->b = insn_fetch(u8, ctxt);
3769 opcode = twobyte_table[ctxt->b];
3771 ctxt->d = opcode.flags;
3773 while (ctxt->d & GroupMask) {
3774 switch (ctxt->d & GroupMask) {
3776 ctxt->modrm = insn_fetch(u8, ctxt);
3778 goffset = (ctxt->modrm >> 3) & 7;
3779 opcode = opcode.u.group[goffset];
3782 ctxt->modrm = insn_fetch(u8, ctxt);
3784 goffset = (ctxt->modrm >> 3) & 7;
3785 if ((ctxt->modrm >> 6) == 3)
3786 opcode = opcode.u.gdual->mod3[goffset];
3788 opcode = opcode.u.gdual->mod012[goffset];
3791 goffset = ctxt->modrm & 7;
3792 opcode = opcode.u.group[goffset];
3795 if (ctxt->rep_prefix && op_prefix)
3796 return EMULATION_FAILED;
3797 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3798 switch (simd_prefix) {
3799 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3800 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3801 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3802 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3806 return EMULATION_FAILED;
3809 ctxt->d &= ~(u64)GroupMask;
3810 ctxt->d |= opcode.flags;
3813 ctxt->execute = opcode.u.execute;
3814 ctxt->check_perm = opcode.check_perm;
3815 ctxt->intercept = opcode.intercept;
3818 if (ctxt->d == 0 || (ctxt->d & Undefined))
3819 return EMULATION_FAILED;
3821 if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3822 return EMULATION_FAILED;
3824 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3827 if (ctxt->d & Op3264) {
3828 if (mode == X86EMUL_MODE_PROT64)
3835 ctxt->op_bytes = 16;
3837 /* ModRM and SIB bytes. */
3838 if (ctxt->d & ModRM) {
3839 rc = decode_modrm(ctxt, &ctxt->memop);
3840 if (!ctxt->has_seg_override)
3841 set_seg_override(ctxt, ctxt->modrm_seg);
3842 } else if (ctxt->d & MemAbs)
3843 rc = decode_abs(ctxt, &ctxt->memop);
3844 if (rc != X86EMUL_CONTINUE)
3847 if (!ctxt->has_seg_override)
3848 set_seg_override(ctxt, VCPU_SREG_DS);
3850 ctxt->memop.addr.mem.seg = seg_override(ctxt);
3852 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
3853 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
3856 * Decode and fetch the source operand: register, memory
3859 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
3860 if (rc != X86EMUL_CONTINUE)
3864 * Decode and fetch the second source operand: register, memory
3867 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
3868 if (rc != X86EMUL_CONTINUE)
3871 /* Decode and fetch the destination operand: register or memory. */
3872 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
3875 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
3876 ctxt->memopp->addr.mem.ea += ctxt->_eip;
3878 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
3881 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3883 /* The second termination condition only applies for REPE
3884 * and REPNE. Test if the repeat string operation prefix is
3885 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3886 * corresponding termination condition according to:
3887 * - if REPE/REPZ and ZF = 0 then done
3888 * - if REPNE/REPNZ and ZF = 1 then done
3890 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3891 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3892 && (((ctxt->rep_prefix == REPE_PREFIX) &&
3893 ((ctxt->eflags & EFLG_ZF) == 0))
3894 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
3895 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3901 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3903 struct x86_emulate_ops *ops = ctxt->ops;
3905 int rc = X86EMUL_CONTINUE;
3906 int saved_dst_type = ctxt->dst.type;
3908 ctxt->mem_read.pos = 0;
3910 if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3911 rc = emulate_ud(ctxt);
3915 /* LOCK prefix is allowed only with some instructions */
3916 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3917 rc = emulate_ud(ctxt);
3921 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3922 rc = emulate_ud(ctxt);
3927 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3928 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3929 rc = emulate_ud(ctxt);
3933 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3934 rc = emulate_nm(ctxt);
3938 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3939 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3940 X86_ICPT_PRE_EXCEPT);
3941 if (rc != X86EMUL_CONTINUE)
3945 /* Privileged instruction can be executed only in CPL=0 */
3946 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3947 rc = emulate_gp(ctxt, 0);
3951 /* Instruction can only be executed in protected mode */
3952 if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3953 rc = emulate_ud(ctxt);
3957 /* Do instruction specific permission checks */
3958 if (ctxt->check_perm) {
3959 rc = ctxt->check_perm(ctxt);
3960 if (rc != X86EMUL_CONTINUE)
3964 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3965 rc = emulator_check_intercept(ctxt, ctxt->intercept,
3966 X86_ICPT_POST_EXCEPT);
3967 if (rc != X86EMUL_CONTINUE)
3971 if (ctxt->rep_prefix && (ctxt->d & String)) {
3972 /* All REP prefixes have the same first termination condition */
3973 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3974 ctxt->eip = ctxt->_eip;
3979 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3980 rc = segmented_read(ctxt, ctxt->src.addr.mem,
3981 ctxt->src.valptr, ctxt->src.bytes);
3982 if (rc != X86EMUL_CONTINUE)
3984 ctxt->src.orig_val64 = ctxt->src.val64;
3987 if (ctxt->src2.type == OP_MEM) {
3988 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3989 &ctxt->src2.val, ctxt->src2.bytes);
3990 if (rc != X86EMUL_CONTINUE)
3994 if ((ctxt->d & DstMask) == ImplicitOps)
3998 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3999 /* optimisation - avoid slow emulated read if Mov */
4000 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4001 &ctxt->dst.val, ctxt->dst.bytes);
4002 if (rc != X86EMUL_CONTINUE)
4005 ctxt->dst.orig_val = ctxt->dst.val;
4009 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4010 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4011 X86_ICPT_POST_MEMACCESS);
4012 if (rc != X86EMUL_CONTINUE)
4016 if (ctxt->execute) {
4017 rc = ctxt->execute(ctxt);
4018 if (rc != X86EMUL_CONTINUE)
4027 case 0x40 ... 0x47: /* inc r16/r32 */
4028 emulate_1op(ctxt, "inc");
4030 case 0x48 ... 0x4f: /* dec r16/r32 */
4031 emulate_1op(ctxt, "dec");
4033 case 0x63: /* movsxd */
4034 if (ctxt->mode != X86EMUL_MODE_PROT64)
4035 goto cannot_emulate;
4036 ctxt->dst.val = (s32) ctxt->src.val;
4038 case 0x6c: /* insb */
4039 case 0x6d: /* insw/insd */
4040 ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
4042 case 0x6e: /* outsb */
4043 case 0x6f: /* outsw/outsd */
4044 ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
4047 case 0x70 ... 0x7f: /* jcc (short) */
4048 if (test_cc(ctxt->b, ctxt->eflags))
4049 rc = jmp_rel(ctxt, ctxt->src.val);
4051 case 0x8d: /* lea r16/r32, m */
4052 ctxt->dst.val = ctxt->src.addr.mem.ea;
4054 case 0x8f: /* pop (sole member of Grp1a) */
4055 rc = em_grp1a(ctxt);
4057 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4058 if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
4062 case 0x98: /* cbw/cwde/cdqe */
4063 switch (ctxt->op_bytes) {
4064 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4065 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4066 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4072 case 0xcc: /* int3 */
4073 rc = emulate_int(ctxt, 3);
4075 case 0xcd: /* int n */
4076 rc = emulate_int(ctxt, ctxt->src.val);
4078 case 0xce: /* into */
4079 if (ctxt->eflags & EFLG_OF)
4080 rc = emulate_int(ctxt, 4);
4082 case 0xd0 ... 0xd1: /* Grp2 */
4085 case 0xd2 ... 0xd3: /* Grp2 */
4086 ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
4089 case 0xe4: /* inb */
4092 case 0xe6: /* outb */
4093 case 0xe7: /* out */
4095 case 0xe9: /* jmp rel */
4096 case 0xeb: /* jmp rel short */
4097 rc = jmp_rel(ctxt, ctxt->src.val);
4098 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4100 case 0xec: /* in al,dx */
4101 case 0xed: /* in (e/r)ax,dx */
4103 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
4105 goto done; /* IO is needed */
4107 case 0xee: /* out dx,al */
4108 case 0xef: /* out dx,(e/r)ax */
4110 ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
4112 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4114 case 0xf4: /* hlt */
4115 ctxt->ops->halt(ctxt);
4117 case 0xf5: /* cmc */
4118 /* complement carry flag from eflags reg */
4119 ctxt->eflags ^= EFLG_CF;
4121 case 0xf8: /* clc */
4122 ctxt->eflags &= ~EFLG_CF;
4124 case 0xf9: /* stc */
4125 ctxt->eflags |= EFLG_CF;
4127 case 0xfc: /* cld */
4128 ctxt->eflags &= ~EFLG_DF;
4130 case 0xfd: /* std */
4131 ctxt->eflags |= EFLG_DF;
4133 case 0xfe: /* Grp4 */
4134 rc = em_grp45(ctxt);
4136 case 0xff: /* Grp5 */
4137 rc = em_grp45(ctxt);
4140 goto cannot_emulate;
4143 if (rc != X86EMUL_CONTINUE)
4147 rc = writeback(ctxt);
4148 if (rc != X86EMUL_CONTINUE)
4152 * restore dst type in case the decoding will be reused
4153 * (happens for string instruction )
4155 ctxt->dst.type = saved_dst_type;
4157 if ((ctxt->d & SrcMask) == SrcSI)
4158 string_addr_inc(ctxt, seg_override(ctxt),
4159 VCPU_REGS_RSI, &ctxt->src);
4161 if ((ctxt->d & DstMask) == DstDI)
4162 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4165 if (ctxt->rep_prefix && (ctxt->d & String)) {
4166 struct read_cache *r = &ctxt->io_read;
4167 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4169 if (!string_insn_completed(ctxt)) {
4171 * Re-enter guest when pio read ahead buffer is empty
4172 * or, if it is not used, after each 1024 iteration.
4174 if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4175 (r->end == 0 || r->end != r->pos)) {
4177 * Reset read cache. Usually happens before
4178 * decode, but since instruction is restarted
4179 * we have to do it here.
4181 ctxt->mem_read.end = 0;
4182 return EMULATION_RESTART;
4184 goto done; /* skip rip writeback */
4188 ctxt->eip = ctxt->_eip;
4191 if (rc == X86EMUL_PROPAGATE_FAULT)
4192 ctxt->have_exception = true;
4193 if (rc == X86EMUL_INTERCEPTED)
4194 return EMULATION_INTERCEPTED;
4196 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4200 case 0x09: /* wbinvd */
4201 (ctxt->ops->wbinvd)(ctxt);
4203 case 0x08: /* invd */
4204 case 0x0d: /* GrpP (prefetch) */
4205 case 0x18: /* Grp16 (prefetch/nop) */
4207 case 0x20: /* mov cr, reg */
4208 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4210 case 0x21: /* mov from dr to reg */
4211 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4213 case 0x22: /* mov reg, cr */
4214 if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4215 emulate_gp(ctxt, 0);
4216 rc = X86EMUL_PROPAGATE_FAULT;
4219 ctxt->dst.type = OP_NONE;
4221 case 0x23: /* mov from reg to dr */
4222 if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4223 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4224 ~0ULL : ~0U)) < 0) {
4225 /* #UD condition is already handled by the code above */
4226 emulate_gp(ctxt, 0);
4227 rc = X86EMUL_PROPAGATE_FAULT;
4231 ctxt->dst.type = OP_NONE; /* no writeback */
4235 msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4236 | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4237 if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4238 emulate_gp(ctxt, 0);
4239 rc = X86EMUL_PROPAGATE_FAULT;
4242 rc = X86EMUL_CONTINUE;
4246 if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4247 emulate_gp(ctxt, 0);
4248 rc = X86EMUL_PROPAGATE_FAULT;
4251 ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4252 ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4254 rc = X86EMUL_CONTINUE;
4256 case 0x40 ... 0x4f: /* cmov */
4257 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4258 if (!test_cc(ctxt->b, ctxt->eflags))
4259 ctxt->dst.type = OP_NONE; /* no writeback */
4261 case 0x80 ... 0x8f: /* jnz rel, etc*/
4262 if (test_cc(ctxt->b, ctxt->eflags))
4263 rc = jmp_rel(ctxt, ctxt->src.val);
4265 case 0x90 ... 0x9f: /* setcc r/m8 */
4266 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4270 ctxt->dst.type = OP_NONE;
4271 /* only subword offset */
4272 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4273 emulate_2op_SrcV_nobyte(ctxt, "bt");
4275 case 0xa4: /* shld imm8, r, r/m */
4276 case 0xa5: /* shld cl, r, r/m */
4277 emulate_2op_cl(ctxt, "shld");
4281 emulate_2op_SrcV_nobyte(ctxt, "bts");
4283 case 0xac: /* shrd imm8, r, r/m */
4284 case 0xad: /* shrd cl, r, r/m */
4285 emulate_2op_cl(ctxt, "shrd");
4287 case 0xae: /* clflush */
4289 case 0xb0 ... 0xb1: /* cmpxchg */
4291 * Save real source value, then compare EAX against
4294 ctxt->src.orig_val = ctxt->src.val;
4295 ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4296 emulate_2op_SrcV(ctxt, "cmp");
4297 if (ctxt->eflags & EFLG_ZF) {
4298 /* Success: write back to memory. */
4299 ctxt->dst.val = ctxt->src.orig_val;
4301 /* Failure: write the value we saw to EAX. */
4302 ctxt->dst.type = OP_REG;
4303 ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4308 emulate_2op_SrcV_nobyte(ctxt, "btr");
4310 case 0xb6 ... 0xb7: /* movzx */
4311 ctxt->dst.bytes = ctxt->op_bytes;
4312 ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4313 : (u16) ctxt->src.val;
4315 case 0xba: /* Grp8 */
4316 switch (ctxt->modrm_reg & 3) {
4329 emulate_2op_SrcV_nobyte(ctxt, "btc");
4331 case 0xbc: { /* bsf */
4333 __asm__ ("bsf %2, %0; setz %1"
4334 : "=r"(ctxt->dst.val), "=q"(zf)
4335 : "r"(ctxt->src.val));
4336 ctxt->eflags &= ~X86_EFLAGS_ZF;
4338 ctxt->eflags |= X86_EFLAGS_ZF;
4339 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4343 case 0xbd: { /* bsr */
4345 __asm__ ("bsr %2, %0; setz %1"
4346 : "=r"(ctxt->dst.val), "=q"(zf)
4347 : "r"(ctxt->src.val));
4348 ctxt->eflags &= ~X86_EFLAGS_ZF;
4350 ctxt->eflags |= X86_EFLAGS_ZF;
4351 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4355 case 0xbe ... 0xbf: /* movsx */
4356 ctxt->dst.bytes = ctxt->op_bytes;
4357 ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4358 (s16) ctxt->src.val;
4360 case 0xc0 ... 0xc1: /* xadd */
4361 emulate_2op_SrcV(ctxt, "add");
4362 /* Write back the register source. */
4363 ctxt->src.val = ctxt->dst.orig_val;
4364 write_register_operand(&ctxt->src);
4366 case 0xc3: /* movnti */
4367 ctxt->dst.bytes = ctxt->op_bytes;
4368 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4369 (u64) ctxt->src.val;
4371 case 0xc7: /* Grp9 (cmpxchg8b) */
4375 goto cannot_emulate;
4378 if (rc != X86EMUL_CONTINUE)
4384 return EMULATION_FAILED;