1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54 #define OpES 20ull /* ES */
55 #define OpCS 21ull /* CS */
56 #define OpSS 22ull /* SS */
57 #define OpDS 23ull /* DS */
58 #define OpFS 24ull /* FS */
59 #define OpGS 25ull /* GS */
61 #define OpBits 5 /* Width of operand field */
62 #define OpMask ((1ull << OpBits) - 1)
65 * Opcode effective-address decode tables.
66 * Note that we only emulate instructions that have at least one memory
67 * operand (excluding implicit stack references). We assume that stack
68 * references and instruction fetches will never occur in special memory
69 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
73 /* Operand sizes: 8-bit operands or specified/overridden size. */
74 #define ByteOp (1<<0) /* 8-bit operands. */
75 /* Destination operand type. */
77 #define ImplicitOps (OpImplicit << DstShift)
78 #define DstReg (OpReg << DstShift)
79 #define DstMem (OpMem << DstShift)
80 #define DstAcc (OpAcc << DstShift)
81 #define DstDI (OpDI << DstShift)
82 #define DstMem64 (OpMem64 << DstShift)
83 #define DstImmUByte (OpImmUByte << DstShift)
84 #define DstDX (OpDX << DstShift)
85 #define DstMask (OpMask << DstShift)
86 /* Source operand type. */
88 #define SrcNone (OpNone << SrcShift)
89 #define SrcReg (OpReg << SrcShift)
90 #define SrcMem (OpMem << SrcShift)
91 #define SrcMem16 (OpMem16 << SrcShift)
92 #define SrcMem32 (OpMem32 << SrcShift)
93 #define SrcImm (OpImm << SrcShift)
94 #define SrcImmByte (OpImmByte << SrcShift)
95 #define SrcOne (OpOne << SrcShift)
96 #define SrcImmUByte (OpImmUByte << SrcShift)
97 #define SrcImmU (OpImmU << SrcShift)
98 #define SrcSI (OpSI << SrcShift)
99 #define SrcImmFAddr (OpImmFAddr << SrcShift)
100 #define SrcMemFAddr (OpMemFAddr << SrcShift)
101 #define SrcAcc (OpAcc << SrcShift)
102 #define SrcImmU16 (OpImmU16 << SrcShift)
103 #define SrcDX (OpDX << SrcShift)
104 #define SrcMask (OpMask << SrcShift)
105 #define BitOp (1<<11)
106 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
107 #define String (1<<13) /* String instruction (rep capable) */
108 #define Stack (1<<14) /* Stack instruction (push/pop) */
109 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
110 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
111 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
112 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
113 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
114 #define Sse (1<<18) /* SSE Vector instruction */
115 /* Generic ModRM decode. */
116 #define ModRM (1<<19)
117 /* Destination is only written; never read. */
120 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
121 #define VendorSpecific (1<<22) /* Vendor specific instruction */
122 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
123 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
124 #define Undefined (1<<25) /* No Such Instruction */
125 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
126 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
128 /* Source 2 operand type */
129 #define Src2Shift (29)
130 #define Src2None (OpNone << Src2Shift)
131 #define Src2CL (OpCL << Src2Shift)
132 #define Src2ImmByte (OpImmByte << Src2Shift)
133 #define Src2One (OpOne << Src2Shift)
134 #define Src2Imm (OpImm << Src2Shift)
135 #define Src2ES (OpES << Src2Shift)
136 #define Src2CS (OpCS << Src2Shift)
137 #define Src2SS (OpSS << Src2Shift)
138 #define Src2DS (OpDS << Src2Shift)
139 #define Src2FS (OpFS << Src2Shift)
140 #define Src2GS (OpGS << Src2Shift)
141 #define Src2Mask (OpMask << Src2Shift)
143 #define X2(x...) x, x
144 #define X3(x...) X2(x), x
145 #define X4(x...) X2(x), X2(x)
146 #define X5(x...) X4(x), x
147 #define X6(x...) X4(x), X2(x)
148 #define X7(x...) X4(x), X3(x)
149 #define X8(x...) X4(x), X4(x)
150 #define X16(x...) X8(x), X8(x)
156 int (*execute)(struct x86_emulate_ctxt *ctxt);
157 struct opcode *group;
158 struct group_dual *gdual;
159 struct gprefix *gprefix;
161 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
165 struct opcode mod012[8];
166 struct opcode mod3[8];
170 struct opcode pfx_no;
171 struct opcode pfx_66;
172 struct opcode pfx_f2;
173 struct opcode pfx_f3;
176 /* EFLAGS bit definitions. */
177 #define EFLG_ID (1<<21)
178 #define EFLG_VIP (1<<20)
179 #define EFLG_VIF (1<<19)
180 #define EFLG_AC (1<<18)
181 #define EFLG_VM (1<<17)
182 #define EFLG_RF (1<<16)
183 #define EFLG_IOPL (3<<12)
184 #define EFLG_NT (1<<14)
185 #define EFLG_OF (1<<11)
186 #define EFLG_DF (1<<10)
187 #define EFLG_IF (1<<9)
188 #define EFLG_TF (1<<8)
189 #define EFLG_SF (1<<7)
190 #define EFLG_ZF (1<<6)
191 #define EFLG_AF (1<<4)
192 #define EFLG_PF (1<<2)
193 #define EFLG_CF (1<<0)
195 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
196 #define EFLG_RESERVED_ONE_MASK 2
199 * Instruction emulation:
200 * Most instructions are emulated directly via a fragment of inline assembly
201 * code. This allows us to save/restore EFLAGS and thus very easily pick up
202 * any modified flags.
205 #if defined(CONFIG_X86_64)
206 #define _LO32 "k" /* force 32-bit operand */
207 #define _STK "%%rsp" /* stack pointer */
208 #elif defined(__i386__)
209 #define _LO32 "" /* force 32-bit operand */
210 #define _STK "%%esp" /* stack pointer */
214 * These EFLAGS bits are restored from saved value during emulation, and
215 * any changes are written back to the saved value after emulation.
217 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
219 /* Before executing instruction: restore necessary bits in EFLAGS. */
220 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
221 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
222 "movl %"_sav",%"_LO32 _tmp"; " \
225 "movl %"_msk",%"_LO32 _tmp"; " \
226 "andl %"_LO32 _tmp",("_STK"); " \
228 "notl %"_LO32 _tmp"; " \
229 "andl %"_LO32 _tmp",("_STK"); " \
230 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
232 "orl %"_LO32 _tmp",("_STK"); " \
236 /* After executing instruction: write-back necessary bits in EFLAGS. */
237 #define _POST_EFLAGS(_sav, _msk, _tmp) \
238 /* _sav |= EFLAGS & _msk; */ \
241 "andl %"_msk",%"_LO32 _tmp"; " \
242 "orl %"_LO32 _tmp",%"_sav"; "
250 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
252 __asm__ __volatile__ ( \
253 _PRE_EFLAGS("0", "4", "2") \
254 _op _suffix " %"_x"3,%1; " \
255 _POST_EFLAGS("0", "4", "2") \
256 : "=m" ((ctxt)->eflags), \
257 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
259 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
263 /* Raw emulation: instruction has two explicit operands. */
264 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
266 unsigned long _tmp; \
268 switch ((ctxt)->dst.bytes) { \
270 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
273 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
276 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
281 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
283 unsigned long _tmp; \
284 switch ((ctxt)->dst.bytes) { \
286 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
289 __emulate_2op_nobyte(ctxt, _op, \
290 _wx, _wy, _lx, _ly, _qx, _qy); \
295 /* Source operand is byte-sized and may be restricted to just %cl. */
296 #define emulate_2op_SrcB(ctxt, _op) \
297 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
299 /* Source operand is byte, word, long or quad sized. */
300 #define emulate_2op_SrcV(ctxt, _op) \
301 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
303 /* Source operand is word, long or quad sized. */
304 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
305 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
307 /* Instruction has three operands and one operand is stored in ECX register */
308 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
310 unsigned long _tmp; \
311 _type _clv = (ctxt)->src2.val; \
312 _type _srcv = (ctxt)->src.val; \
313 _type _dstv = (ctxt)->dst.val; \
315 __asm__ __volatile__ ( \
316 _PRE_EFLAGS("0", "5", "2") \
317 _op _suffix " %4,%1 \n" \
318 _POST_EFLAGS("0", "5", "2") \
319 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
320 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
323 (ctxt)->src2.val = (unsigned long) _clv; \
324 (ctxt)->src2.val = (unsigned long) _srcv; \
325 (ctxt)->dst.val = (unsigned long) _dstv; \
328 #define emulate_2op_cl(ctxt, _op) \
330 switch ((ctxt)->dst.bytes) { \
332 __emulate_2op_cl(ctxt, _op, "w", u16); \
335 __emulate_2op_cl(ctxt, _op, "l", u32); \
338 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
343 #define __emulate_1op(ctxt, _op, _suffix) \
345 unsigned long _tmp; \
347 __asm__ __volatile__ ( \
348 _PRE_EFLAGS("0", "3", "2") \
349 _op _suffix " %1; " \
350 _POST_EFLAGS("0", "3", "2") \
351 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
353 : "i" (EFLAGS_MASK)); \
356 /* Instruction has only one explicit operand (no source operand). */
357 #define emulate_1op(ctxt, _op) \
359 switch ((ctxt)->dst.bytes) { \
360 case 1: __emulate_1op(ctxt, _op, "b"); break; \
361 case 2: __emulate_1op(ctxt, _op, "w"); break; \
362 case 4: __emulate_1op(ctxt, _op, "l"); break; \
363 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
367 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
369 unsigned long _tmp; \
370 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
371 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
373 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0", "5", "1") \
376 _op _suffix " %6; " \
378 _POST_EFLAGS("0", "5", "1") \
379 ".pushsection .fixup,\"ax\" \n\t" \
380 "3: movb $1, %4 \n\t" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
385 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
386 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
387 "a" (*rax), "d" (*rdx)); \
390 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
391 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
393 switch((ctxt)->src.bytes) { \
395 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
398 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
401 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
404 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
409 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
410 enum x86_intercept intercept,
411 enum x86_intercept_stage stage)
413 struct x86_instruction_info info = {
414 .intercept = intercept,
415 .rep_prefix = ctxt->rep_prefix,
416 .modrm_mod = ctxt->modrm_mod,
417 .modrm_reg = ctxt->modrm_reg,
418 .modrm_rm = ctxt->modrm_rm,
419 .src_val = ctxt->src.val64,
420 .src_bytes = ctxt->src.bytes,
421 .dst_bytes = ctxt->dst.bytes,
422 .ad_bytes = ctxt->ad_bytes,
423 .next_rip = ctxt->eip,
426 return ctxt->ops->intercept(ctxt, &info, stage);
429 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
431 return (1UL << (ctxt->ad_bytes << 3)) - 1;
434 /* Access/update address held in a register, based on addressing mode. */
435 static inline unsigned long
436 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
438 if (ctxt->ad_bytes == sizeof(unsigned long))
441 return reg & ad_mask(ctxt);
444 static inline unsigned long
445 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
447 return address_mask(ctxt, reg);
451 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
453 if (ctxt->ad_bytes == sizeof(unsigned long))
456 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
459 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
461 register_address_increment(ctxt, &ctxt->_eip, rel);
464 static u32 desc_limit_scaled(struct desc_struct *desc)
466 u32 limit = get_desc_limit(desc);
468 return desc->g ? (limit << 12) | 0xfff : limit;
471 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
473 ctxt->has_seg_override = true;
474 ctxt->seg_override = seg;
477 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
479 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
482 return ctxt->ops->get_cached_segment_base(ctxt, seg);
485 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
487 if (!ctxt->has_seg_override)
490 return ctxt->seg_override;
493 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
494 u32 error, bool valid)
496 ctxt->exception.vector = vec;
497 ctxt->exception.error_code = error;
498 ctxt->exception.error_code_valid = valid;
499 return X86EMUL_PROPAGATE_FAULT;
502 static int emulate_db(struct x86_emulate_ctxt *ctxt)
504 return emulate_exception(ctxt, DB_VECTOR, 0, false);
507 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
509 return emulate_exception(ctxt, GP_VECTOR, err, true);
512 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
514 return emulate_exception(ctxt, SS_VECTOR, err, true);
517 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
519 return emulate_exception(ctxt, UD_VECTOR, 0, false);
522 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
524 return emulate_exception(ctxt, TS_VECTOR, err, true);
527 static int emulate_de(struct x86_emulate_ctxt *ctxt)
529 return emulate_exception(ctxt, DE_VECTOR, 0, false);
532 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
534 return emulate_exception(ctxt, NM_VECTOR, 0, false);
537 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
540 struct desc_struct desc;
542 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
546 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
551 struct desc_struct desc;
553 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
554 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
557 static int __linearize(struct x86_emulate_ctxt *ctxt,
558 struct segmented_address addr,
559 unsigned size, bool write, bool fetch,
562 struct desc_struct desc;
569 la = seg_base(ctxt, addr.seg) + addr.ea;
570 switch (ctxt->mode) {
571 case X86EMUL_MODE_REAL:
573 case X86EMUL_MODE_PROT64:
574 if (((signed long)la << 16) >> 16 != la)
575 return emulate_gp(ctxt, 0);
578 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
582 /* code segment or read-only data segment */
583 if (((desc.type & 8) || !(desc.type & 2)) && write)
585 /* unreadable code segment */
586 if (!fetch && (desc.type & 8) && !(desc.type & 2))
588 lim = desc_limit_scaled(&desc);
589 if ((desc.type & 8) || !(desc.type & 4)) {
590 /* expand-up segment */
591 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
594 /* exapand-down segment */
595 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
597 lim = desc.d ? 0xffffffff : 0xffff;
598 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
601 cpl = ctxt->ops->cpl(ctxt);
604 if (!(desc.type & 8)) {
608 } else if ((desc.type & 8) && !(desc.type & 4)) {
609 /* nonconforming code segment */
612 } else if ((desc.type & 8) && (desc.type & 4)) {
613 /* conforming code segment */
619 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
622 return X86EMUL_CONTINUE;
624 if (addr.seg == VCPU_SREG_SS)
625 return emulate_ss(ctxt, addr.seg);
627 return emulate_gp(ctxt, addr.seg);
630 static int linearize(struct x86_emulate_ctxt *ctxt,
631 struct segmented_address addr,
632 unsigned size, bool write,
635 return __linearize(ctxt, addr, size, write, false, linear);
639 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
640 struct segmented_address addr,
647 rc = linearize(ctxt, addr, size, false, &linear);
648 if (rc != X86EMUL_CONTINUE)
650 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
654 * Fetch the next byte of the instruction being emulated which is pointed to
655 * by ctxt->_eip, then increment ctxt->_eip.
657 * Also prefetch the remaining bytes of the instruction without crossing page
658 * boundary if they are not in fetch_cache yet.
660 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
662 struct fetch_cache *fc = &ctxt->fetch;
666 if (ctxt->_eip == fc->end) {
667 unsigned long linear;
668 struct segmented_address addr = { .seg = VCPU_SREG_CS,
670 cur_size = fc->end - fc->start;
671 size = min(15UL - cur_size,
672 PAGE_SIZE - offset_in_page(ctxt->_eip));
673 rc = __linearize(ctxt, addr, size, false, true, &linear);
674 if (unlikely(rc != X86EMUL_CONTINUE))
676 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
677 size, &ctxt->exception);
678 if (unlikely(rc != X86EMUL_CONTINUE))
682 *dest = fc->data[ctxt->_eip - fc->start];
684 return X86EMUL_CONTINUE;
687 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
688 void *dest, unsigned size)
692 /* x86 instructions are limited to 15 bytes. */
693 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
694 return X86EMUL_UNHANDLEABLE;
696 rc = do_insn_fetch_byte(ctxt, dest++);
697 if (rc != X86EMUL_CONTINUE)
700 return X86EMUL_CONTINUE;
703 /* Fetch next part of the instruction being emulated. */
704 #define insn_fetch(_type, _ctxt) \
705 ({ unsigned long _x; \
706 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
707 if (rc != X86EMUL_CONTINUE) \
712 #define insn_fetch_arr(_arr, _size, _ctxt) \
713 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
714 if (rc != X86EMUL_CONTINUE) \
719 * Given the 'reg' portion of a ModRM byte, and a register block, return a
720 * pointer into the block that addresses the relevant register.
721 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
723 static void *decode_register(u8 modrm_reg, unsigned long *regs,
728 p = ®s[modrm_reg];
729 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
730 p = (unsigned char *)®s[modrm_reg & 3] + 1;
734 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
735 struct segmented_address addr,
736 u16 *size, unsigned long *address, int op_bytes)
743 rc = segmented_read_std(ctxt, addr, size, 2);
744 if (rc != X86EMUL_CONTINUE)
747 rc = segmented_read_std(ctxt, addr, address, op_bytes);
751 static int test_cc(unsigned int condition, unsigned int flags)
755 switch ((condition & 15) >> 1) {
757 rc |= (flags & EFLG_OF);
759 case 1: /* b/c/nae */
760 rc |= (flags & EFLG_CF);
763 rc |= (flags & EFLG_ZF);
766 rc |= (flags & (EFLG_CF|EFLG_ZF));
769 rc |= (flags & EFLG_SF);
772 rc |= (flags & EFLG_PF);
775 rc |= (flags & EFLG_ZF);
778 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
782 /* Odd condition identifiers (lsb == 1) have inverted sense. */
783 return (!!rc ^ (condition & 1));
786 static void fetch_register_operand(struct operand *op)
790 op->val = *(u8 *)op->addr.reg;
793 op->val = *(u16 *)op->addr.reg;
796 op->val = *(u32 *)op->addr.reg;
799 op->val = *(u64 *)op->addr.reg;
804 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
806 ctxt->ops->get_fpu(ctxt);
808 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
809 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
810 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
811 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
812 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
813 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
814 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
815 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
817 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
818 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
819 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
820 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
821 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
822 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
823 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
824 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
828 ctxt->ops->put_fpu(ctxt);
831 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
834 ctxt->ops->get_fpu(ctxt);
836 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
837 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
838 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
839 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
840 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
841 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
842 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
843 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
845 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
846 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
847 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
848 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
849 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
850 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
851 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
852 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
856 ctxt->ops->put_fpu(ctxt);
859 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
863 unsigned reg = ctxt->modrm_reg;
864 int highbyte_regs = ctxt->rex_prefix == 0;
866 if (!(ctxt->d & ModRM))
867 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
873 read_sse_reg(ctxt, &op->vec_val, reg);
878 if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
879 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
882 op->addr.reg = decode_register(reg, ctxt->regs, 0);
883 op->bytes = ctxt->op_bytes;
885 fetch_register_operand(op);
886 op->orig_val = op->val;
889 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
893 int index_reg = 0, base_reg = 0, scale;
894 int rc = X86EMUL_CONTINUE;
897 if (ctxt->rex_prefix) {
898 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
899 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
900 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
903 ctxt->modrm = insn_fetch(u8, ctxt);
904 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
905 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
906 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
907 ctxt->modrm_seg = VCPU_SREG_DS;
909 if (ctxt->modrm_mod == 3) {
911 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
912 op->addr.reg = decode_register(ctxt->modrm_rm,
913 ctxt->regs, ctxt->d & ByteOp);
917 op->addr.xmm = ctxt->modrm_rm;
918 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
921 fetch_register_operand(op);
927 if (ctxt->ad_bytes == 2) {
928 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
929 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
930 unsigned si = ctxt->regs[VCPU_REGS_RSI];
931 unsigned di = ctxt->regs[VCPU_REGS_RDI];
933 /* 16-bit ModR/M decode. */
934 switch (ctxt->modrm_mod) {
936 if (ctxt->modrm_rm == 6)
937 modrm_ea += insn_fetch(u16, ctxt);
940 modrm_ea += insn_fetch(s8, ctxt);
943 modrm_ea += insn_fetch(u16, ctxt);
946 switch (ctxt->modrm_rm) {
966 if (ctxt->modrm_mod != 0)
973 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
974 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
975 ctxt->modrm_seg = VCPU_SREG_SS;
976 modrm_ea = (u16)modrm_ea;
978 /* 32/64-bit ModR/M decode. */
979 if ((ctxt->modrm_rm & 7) == 4) {
980 sib = insn_fetch(u8, ctxt);
981 index_reg |= (sib >> 3) & 7;
985 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
986 modrm_ea += insn_fetch(s32, ctxt);
988 modrm_ea += ctxt->regs[base_reg];
990 modrm_ea += ctxt->regs[index_reg] << scale;
991 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
992 if (ctxt->mode == X86EMUL_MODE_PROT64)
993 ctxt->rip_relative = 1;
995 modrm_ea += ctxt->regs[ctxt->modrm_rm];
996 switch (ctxt->modrm_mod) {
998 if (ctxt->modrm_rm == 5)
999 modrm_ea += insn_fetch(s32, ctxt);
1002 modrm_ea += insn_fetch(s8, ctxt);
1005 modrm_ea += insn_fetch(s32, ctxt);
1009 op->addr.mem.ea = modrm_ea;
1014 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1017 int rc = X86EMUL_CONTINUE;
1020 switch (ctxt->ad_bytes) {
1022 op->addr.mem.ea = insn_fetch(u16, ctxt);
1025 op->addr.mem.ea = insn_fetch(u32, ctxt);
1028 op->addr.mem.ea = insn_fetch(u64, ctxt);
1035 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1039 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1040 mask = ~(ctxt->dst.bytes * 8 - 1);
1042 if (ctxt->src.bytes == 2)
1043 sv = (s16)ctxt->src.val & (s16)mask;
1044 else if (ctxt->src.bytes == 4)
1045 sv = (s32)ctxt->src.val & (s32)mask;
1047 ctxt->dst.addr.mem.ea += (sv >> 3);
1050 /* only subword offset */
1051 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1054 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1055 unsigned long addr, void *dest, unsigned size)
1058 struct read_cache *mc = &ctxt->mem_read;
1061 int n = min(size, 8u);
1063 if (mc->pos < mc->end)
1066 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1068 if (rc != X86EMUL_CONTINUE)
1073 memcpy(dest, mc->data + mc->pos, n);
1078 return X86EMUL_CONTINUE;
1081 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1082 struct segmented_address addr,
1089 rc = linearize(ctxt, addr, size, false, &linear);
1090 if (rc != X86EMUL_CONTINUE)
1092 return read_emulated(ctxt, linear, data, size);
1095 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1096 struct segmented_address addr,
1103 rc = linearize(ctxt, addr, size, true, &linear);
1104 if (rc != X86EMUL_CONTINUE)
1106 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1110 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1111 struct segmented_address addr,
1112 const void *orig_data, const void *data,
1118 rc = linearize(ctxt, addr, size, true, &linear);
1119 if (rc != X86EMUL_CONTINUE)
1121 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1122 size, &ctxt->exception);
1125 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1126 unsigned int size, unsigned short port,
1129 struct read_cache *rc = &ctxt->io_read;
1131 if (rc->pos == rc->end) { /* refill pio read ahead */
1132 unsigned int in_page, n;
1133 unsigned int count = ctxt->rep_prefix ?
1134 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1135 in_page = (ctxt->eflags & EFLG_DF) ?
1136 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1137 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1138 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1142 rc->pos = rc->end = 0;
1143 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1148 memcpy(dest, rc->data + rc->pos, size);
1153 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1154 u16 selector, struct desc_ptr *dt)
1156 struct x86_emulate_ops *ops = ctxt->ops;
1158 if (selector & 1 << 2) {
1159 struct desc_struct desc;
1162 memset (dt, 0, sizeof *dt);
1163 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1166 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1167 dt->address = get_desc_base(&desc);
1169 ops->get_gdt(ctxt, dt);
1172 /* allowed just for 8 bytes segments */
1173 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1174 u16 selector, struct desc_struct *desc)
1177 u16 index = selector >> 3;
1180 get_descriptor_table_ptr(ctxt, selector, &dt);
1182 if (dt.size < index * 8 + 7)
1183 return emulate_gp(ctxt, selector & 0xfffc);
1185 addr = dt.address + index * 8;
1186 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1190 /* allowed just for 8 bytes segments */
1191 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1192 u16 selector, struct desc_struct *desc)
1195 u16 index = selector >> 3;
1198 get_descriptor_table_ptr(ctxt, selector, &dt);
1200 if (dt.size < index * 8 + 7)
1201 return emulate_gp(ctxt, selector & 0xfffc);
1203 addr = dt.address + index * 8;
1204 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1208 /* Does not support long mode */
1209 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1210 u16 selector, int seg)
1212 struct desc_struct seg_desc;
1214 unsigned err_vec = GP_VECTOR;
1216 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1219 memset(&seg_desc, 0, sizeof seg_desc);
1221 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1222 || ctxt->mode == X86EMUL_MODE_REAL) {
1223 /* set real mode segment descriptor */
1224 set_desc_base(&seg_desc, selector << 4);
1225 set_desc_limit(&seg_desc, 0xffff);
1232 /* NULL selector is not valid for TR, CS and SS */
1233 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1237 /* TR should be in GDT only */
1238 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1241 if (null_selector) /* for NULL selector skip all following checks */
1244 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1245 if (ret != X86EMUL_CONTINUE)
1248 err_code = selector & 0xfffc;
1249 err_vec = GP_VECTOR;
1251 /* can't load system descriptor into segment selecor */
1252 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1256 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1262 cpl = ctxt->ops->cpl(ctxt);
1267 * segment is not a writable data segment or segment
1268 * selector's RPL != CPL or segment selector's RPL != CPL
1270 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1274 if (!(seg_desc.type & 8))
1277 if (seg_desc.type & 4) {
1283 if (rpl > cpl || dpl != cpl)
1286 /* CS(RPL) <- CPL */
1287 selector = (selector & 0xfffc) | cpl;
1290 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1293 case VCPU_SREG_LDTR:
1294 if (seg_desc.s || seg_desc.type != 2)
1297 default: /* DS, ES, FS, or GS */
1299 * segment is not a data or readable code segment or
1300 * ((segment is a data or nonconforming code segment)
1301 * and (both RPL and CPL > DPL))
1303 if ((seg_desc.type & 0xa) == 0x8 ||
1304 (((seg_desc.type & 0xc) != 0xc) &&
1305 (rpl > dpl && cpl > dpl)))
1311 /* mark segment as accessed */
1313 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1314 if (ret != X86EMUL_CONTINUE)
1318 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1319 return X86EMUL_CONTINUE;
1321 emulate_exception(ctxt, err_vec, err_code, true);
1322 return X86EMUL_PROPAGATE_FAULT;
1325 static void write_register_operand(struct operand *op)
1327 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1328 switch (op->bytes) {
1330 *(u8 *)op->addr.reg = (u8)op->val;
1333 *(u16 *)op->addr.reg = (u16)op->val;
1336 *op->addr.reg = (u32)op->val;
1337 break; /* 64b: zero-extend */
1339 *op->addr.reg = op->val;
1344 static int writeback(struct x86_emulate_ctxt *ctxt)
1348 switch (ctxt->dst.type) {
1350 write_register_operand(&ctxt->dst);
1353 if (ctxt->lock_prefix)
1354 rc = segmented_cmpxchg(ctxt,
1356 &ctxt->dst.orig_val,
1360 rc = segmented_write(ctxt,
1364 if (rc != X86EMUL_CONTINUE)
1368 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1376 return X86EMUL_CONTINUE;
1379 static int em_push(struct x86_emulate_ctxt *ctxt)
1381 struct segmented_address addr;
1383 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1384 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1385 addr.seg = VCPU_SREG_SS;
1387 /* Disable writeback. */
1388 ctxt->dst.type = OP_NONE;
1389 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1392 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1393 void *dest, int len)
1396 struct segmented_address addr;
1398 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1399 addr.seg = VCPU_SREG_SS;
1400 rc = segmented_read(ctxt, addr, dest, len);
1401 if (rc != X86EMUL_CONTINUE)
1404 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1408 static int em_pop(struct x86_emulate_ctxt *ctxt)
1410 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1413 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1414 void *dest, int len)
1417 unsigned long val, change_mask;
1418 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1419 int cpl = ctxt->ops->cpl(ctxt);
1421 rc = emulate_pop(ctxt, &val, len);
1422 if (rc != X86EMUL_CONTINUE)
1425 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1426 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1428 switch(ctxt->mode) {
1429 case X86EMUL_MODE_PROT64:
1430 case X86EMUL_MODE_PROT32:
1431 case X86EMUL_MODE_PROT16:
1433 change_mask |= EFLG_IOPL;
1435 change_mask |= EFLG_IF;
1437 case X86EMUL_MODE_VM86:
1439 return emulate_gp(ctxt, 0);
1440 change_mask |= EFLG_IF;
1442 default: /* real mode */
1443 change_mask |= (EFLG_IOPL | EFLG_IF);
1447 *(unsigned long *)dest =
1448 (ctxt->eflags & ~change_mask) | (val & change_mask);
1453 static int em_popf(struct x86_emulate_ctxt *ctxt)
1455 ctxt->dst.type = OP_REG;
1456 ctxt->dst.addr.reg = &ctxt->eflags;
1457 ctxt->dst.bytes = ctxt->op_bytes;
1458 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1461 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1463 int seg = ctxt->src2.val;
1465 ctxt->src.val = get_segment_selector(ctxt, seg);
1467 return em_push(ctxt);
1470 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1472 int seg = ctxt->src2.val;
1473 unsigned long selector;
1476 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1477 if (rc != X86EMUL_CONTINUE)
1480 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1484 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1486 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1487 int rc = X86EMUL_CONTINUE;
1488 int reg = VCPU_REGS_RAX;
1490 while (reg <= VCPU_REGS_RDI) {
1491 (reg == VCPU_REGS_RSP) ?
1492 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1495 if (rc != X86EMUL_CONTINUE)
1504 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1506 ctxt->src.val = (unsigned long)ctxt->eflags;
1507 return em_push(ctxt);
1510 static int em_popa(struct x86_emulate_ctxt *ctxt)
1512 int rc = X86EMUL_CONTINUE;
1513 int reg = VCPU_REGS_RDI;
1515 while (reg >= VCPU_REGS_RAX) {
1516 if (reg == VCPU_REGS_RSP) {
1517 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1522 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1523 if (rc != X86EMUL_CONTINUE)
1530 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1532 struct x86_emulate_ops *ops = ctxt->ops;
1539 /* TODO: Add limit checks */
1540 ctxt->src.val = ctxt->eflags;
1542 if (rc != X86EMUL_CONTINUE)
1545 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1547 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1549 if (rc != X86EMUL_CONTINUE)
1552 ctxt->src.val = ctxt->_eip;
1554 if (rc != X86EMUL_CONTINUE)
1557 ops->get_idt(ctxt, &dt);
1559 eip_addr = dt.address + (irq << 2);
1560 cs_addr = dt.address + (irq << 2) + 2;
1562 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1563 if (rc != X86EMUL_CONTINUE)
1566 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1567 if (rc != X86EMUL_CONTINUE)
1570 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1571 if (rc != X86EMUL_CONTINUE)
1579 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1581 switch(ctxt->mode) {
1582 case X86EMUL_MODE_REAL:
1583 return emulate_int_real(ctxt, irq);
1584 case X86EMUL_MODE_VM86:
1585 case X86EMUL_MODE_PROT16:
1586 case X86EMUL_MODE_PROT32:
1587 case X86EMUL_MODE_PROT64:
1589 /* Protected mode interrupts unimplemented yet */
1590 return X86EMUL_UNHANDLEABLE;
1594 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1596 int rc = X86EMUL_CONTINUE;
1597 unsigned long temp_eip = 0;
1598 unsigned long temp_eflags = 0;
1599 unsigned long cs = 0;
1600 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1601 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1602 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1603 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1605 /* TODO: Add stack limit check */
1607 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1609 if (rc != X86EMUL_CONTINUE)
1612 if (temp_eip & ~0xffff)
1613 return emulate_gp(ctxt, 0);
1615 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1617 if (rc != X86EMUL_CONTINUE)
1620 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1622 if (rc != X86EMUL_CONTINUE)
1625 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1627 if (rc != X86EMUL_CONTINUE)
1630 ctxt->_eip = temp_eip;
1633 if (ctxt->op_bytes == 4)
1634 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1635 else if (ctxt->op_bytes == 2) {
1636 ctxt->eflags &= ~0xffff;
1637 ctxt->eflags |= temp_eflags;
1640 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1641 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1646 static int em_iret(struct x86_emulate_ctxt *ctxt)
1648 switch(ctxt->mode) {
1649 case X86EMUL_MODE_REAL:
1650 return emulate_iret_real(ctxt);
1651 case X86EMUL_MODE_VM86:
1652 case X86EMUL_MODE_PROT16:
1653 case X86EMUL_MODE_PROT32:
1654 case X86EMUL_MODE_PROT64:
1656 /* iret from protected mode unimplemented yet */
1657 return X86EMUL_UNHANDLEABLE;
1661 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1666 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1668 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1669 if (rc != X86EMUL_CONTINUE)
1673 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1674 return X86EMUL_CONTINUE;
1677 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1679 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1682 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1684 switch (ctxt->modrm_reg) {
1686 emulate_2op_SrcB(ctxt, "rol");
1689 emulate_2op_SrcB(ctxt, "ror");
1692 emulate_2op_SrcB(ctxt, "rcl");
1695 emulate_2op_SrcB(ctxt, "rcr");
1697 case 4: /* sal/shl */
1698 case 6: /* sal/shl */
1699 emulate_2op_SrcB(ctxt, "sal");
1702 emulate_2op_SrcB(ctxt, "shr");
1705 emulate_2op_SrcB(ctxt, "sar");
1708 return X86EMUL_CONTINUE;
1711 static int em_not(struct x86_emulate_ctxt *ctxt)
1713 ctxt->dst.val = ~ctxt->dst.val;
1714 return X86EMUL_CONTINUE;
1717 static int em_neg(struct x86_emulate_ctxt *ctxt)
1719 emulate_1op(ctxt, "neg");
1720 return X86EMUL_CONTINUE;
1723 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1727 emulate_1op_rax_rdx(ctxt, "mul", ex);
1728 return X86EMUL_CONTINUE;
1731 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1735 emulate_1op_rax_rdx(ctxt, "imul", ex);
1736 return X86EMUL_CONTINUE;
1739 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1743 emulate_1op_rax_rdx(ctxt, "div", de);
1745 return emulate_de(ctxt);
1746 return X86EMUL_CONTINUE;
1749 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1753 emulate_1op_rax_rdx(ctxt, "idiv", de);
1755 return emulate_de(ctxt);
1756 return X86EMUL_CONTINUE;
1759 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1761 int rc = X86EMUL_CONTINUE;
1763 switch (ctxt->modrm_reg) {
1765 emulate_1op(ctxt, "inc");
1768 emulate_1op(ctxt, "dec");
1770 case 2: /* call near abs */ {
1772 old_eip = ctxt->_eip;
1773 ctxt->_eip = ctxt->src.val;
1774 ctxt->src.val = old_eip;
1778 case 4: /* jmp abs */
1779 ctxt->_eip = ctxt->src.val;
1781 case 5: /* jmp far */
1782 rc = em_jmp_far(ctxt);
1791 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1793 u64 old = ctxt->dst.orig_val64;
1795 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1796 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1797 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1798 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1799 ctxt->eflags &= ~EFLG_ZF;
1801 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1802 (u32) ctxt->regs[VCPU_REGS_RBX];
1804 ctxt->eflags |= EFLG_ZF;
1806 return X86EMUL_CONTINUE;
1809 static int em_ret(struct x86_emulate_ctxt *ctxt)
1811 ctxt->dst.type = OP_REG;
1812 ctxt->dst.addr.reg = &ctxt->_eip;
1813 ctxt->dst.bytes = ctxt->op_bytes;
1814 return em_pop(ctxt);
1817 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1821 int cpl = ctxt->ops->cpl(ctxt);
1823 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1824 if (rc != X86EMUL_CONTINUE)
1826 if (ctxt->op_bytes == 4)
1827 ctxt->_eip = (u32)ctxt->_eip;
1828 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1829 if (rc != X86EMUL_CONTINUE)
1831 /* Outer-privilege level return is not implemented */
1832 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
1833 return X86EMUL_UNHANDLEABLE;
1834 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1838 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1840 int seg = ctxt->src2.val;
1844 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1846 rc = load_segment_descriptor(ctxt, sel, seg);
1847 if (rc != X86EMUL_CONTINUE)
1850 ctxt->dst.val = ctxt->src.val;
1855 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1856 struct desc_struct *cs, struct desc_struct *ss)
1860 memset(cs, 0, sizeof(struct desc_struct));
1861 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1862 memset(ss, 0, sizeof(struct desc_struct));
1864 cs->l = 0; /* will be adjusted later */
1865 set_desc_base(cs, 0); /* flat segment */
1866 cs->g = 1; /* 4kb granularity */
1867 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1868 cs->type = 0x0b; /* Read, Execute, Accessed */
1870 cs->dpl = 0; /* will be adjusted later */
1874 set_desc_base(ss, 0); /* flat segment */
1875 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1876 ss->g = 1; /* 4kb granularity */
1878 ss->type = 0x03; /* Read/Write, Accessed */
1879 ss->d = 1; /* 32bit stack segment */
1884 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1886 struct x86_emulate_ops *ops = ctxt->ops;
1887 u32 eax, ebx, ecx, edx;
1890 * syscall should always be enabled in longmode - so only become
1891 * vendor specific (cpuid) if other modes are active...
1893 if (ctxt->mode == X86EMUL_MODE_PROT64)
1898 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1900 * Intel ("GenuineIntel")
1901 * remark: Intel CPUs only support "syscall" in 64bit
1902 * longmode. Also an 64bit guest with a
1903 * 32bit compat-app running will #UD !! While this
1904 * behaviour can be fixed (by emulating) into AMD
1905 * response - CPUs of AMD can't behave like Intel.
1907 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1908 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1909 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1912 /* AMD ("AuthenticAMD") */
1913 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1914 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1915 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
1918 /* AMD ("AMDisbetter!") */
1919 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
1920 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
1921 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
1925 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
1929 static int em_syscall(struct x86_emulate_ctxt *ctxt)
1931 struct x86_emulate_ops *ops = ctxt->ops;
1932 struct desc_struct cs, ss;
1937 /* syscall is not available in real mode */
1938 if (ctxt->mode == X86EMUL_MODE_REAL ||
1939 ctxt->mode == X86EMUL_MODE_VM86)
1940 return emulate_ud(ctxt);
1942 if (!(em_syscall_is_enabled(ctxt)))
1943 return emulate_ud(ctxt);
1945 ops->get_msr(ctxt, MSR_EFER, &efer);
1946 setup_syscalls_segments(ctxt, &cs, &ss);
1948 if (!(efer & EFER_SCE))
1949 return emulate_ud(ctxt);
1951 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1953 cs_sel = (u16)(msr_data & 0xfffc);
1954 ss_sel = (u16)(msr_data + 8);
1956 if (efer & EFER_LMA) {
1960 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1961 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1963 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
1964 if (efer & EFER_LMA) {
1965 #ifdef CONFIG_X86_64
1966 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1969 ctxt->mode == X86EMUL_MODE_PROT64 ?
1970 MSR_LSTAR : MSR_CSTAR, &msr_data);
1971 ctxt->_eip = msr_data;
1973 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1974 ctxt->eflags &= ~(msr_data | EFLG_RF);
1978 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1979 ctxt->_eip = (u32)msr_data;
1981 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1984 return X86EMUL_CONTINUE;
1987 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1989 struct x86_emulate_ops *ops = ctxt->ops;
1990 struct desc_struct cs, ss;
1995 ops->get_msr(ctxt, MSR_EFER, &efer);
1996 /* inject #GP if in real mode */
1997 if (ctxt->mode == X86EMUL_MODE_REAL)
1998 return emulate_gp(ctxt, 0);
2000 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2001 * Therefore, we inject an #UD.
2003 if (ctxt->mode == X86EMUL_MODE_PROT64)
2004 return emulate_ud(ctxt);
2006 setup_syscalls_segments(ctxt, &cs, &ss);
2008 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2009 switch (ctxt->mode) {
2010 case X86EMUL_MODE_PROT32:
2011 if ((msr_data & 0xfffc) == 0x0)
2012 return emulate_gp(ctxt, 0);
2014 case X86EMUL_MODE_PROT64:
2015 if (msr_data == 0x0)
2016 return emulate_gp(ctxt, 0);
2020 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2021 cs_sel = (u16)msr_data;
2022 cs_sel &= ~SELECTOR_RPL_MASK;
2023 ss_sel = cs_sel + 8;
2024 ss_sel &= ~SELECTOR_RPL_MASK;
2025 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2030 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2031 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2033 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2034 ctxt->_eip = msr_data;
2036 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2037 ctxt->regs[VCPU_REGS_RSP] = msr_data;
2039 return X86EMUL_CONTINUE;
2042 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2044 struct x86_emulate_ops *ops = ctxt->ops;
2045 struct desc_struct cs, ss;
2048 u16 cs_sel = 0, ss_sel = 0;
2050 /* inject #GP if in real mode or Virtual 8086 mode */
2051 if (ctxt->mode == X86EMUL_MODE_REAL ||
2052 ctxt->mode == X86EMUL_MODE_VM86)
2053 return emulate_gp(ctxt, 0);
2055 setup_syscalls_segments(ctxt, &cs, &ss);
2057 if ((ctxt->rex_prefix & 0x8) != 0x0)
2058 usermode = X86EMUL_MODE_PROT64;
2060 usermode = X86EMUL_MODE_PROT32;
2064 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2066 case X86EMUL_MODE_PROT32:
2067 cs_sel = (u16)(msr_data + 16);
2068 if ((msr_data & 0xfffc) == 0x0)
2069 return emulate_gp(ctxt, 0);
2070 ss_sel = (u16)(msr_data + 24);
2072 case X86EMUL_MODE_PROT64:
2073 cs_sel = (u16)(msr_data + 32);
2074 if (msr_data == 0x0)
2075 return emulate_gp(ctxt, 0);
2076 ss_sel = cs_sel + 8;
2081 cs_sel |= SELECTOR_RPL_MASK;
2082 ss_sel |= SELECTOR_RPL_MASK;
2084 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2085 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2087 ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2088 ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2090 return X86EMUL_CONTINUE;
2093 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2096 if (ctxt->mode == X86EMUL_MODE_REAL)
2098 if (ctxt->mode == X86EMUL_MODE_VM86)
2100 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2101 return ctxt->ops->cpl(ctxt) > iopl;
2104 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2107 struct x86_emulate_ops *ops = ctxt->ops;
2108 struct desc_struct tr_seg;
2111 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2112 unsigned mask = (1 << len) - 1;
2115 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2118 if (desc_limit_scaled(&tr_seg) < 103)
2120 base = get_desc_base(&tr_seg);
2121 #ifdef CONFIG_X86_64
2122 base |= ((u64)base3) << 32;
2124 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2125 if (r != X86EMUL_CONTINUE)
2127 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2129 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2130 if (r != X86EMUL_CONTINUE)
2132 if ((perm >> bit_idx) & mask)
2137 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2143 if (emulator_bad_iopl(ctxt))
2144 if (!emulator_io_port_access_allowed(ctxt, port, len))
2147 ctxt->perm_ok = true;
2152 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2153 struct tss_segment_16 *tss)
2155 tss->ip = ctxt->_eip;
2156 tss->flag = ctxt->eflags;
2157 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2158 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2159 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2160 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2161 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2162 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2163 tss->si = ctxt->regs[VCPU_REGS_RSI];
2164 tss->di = ctxt->regs[VCPU_REGS_RDI];
2166 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2167 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2168 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2169 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2170 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2173 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2174 struct tss_segment_16 *tss)
2178 ctxt->_eip = tss->ip;
2179 ctxt->eflags = tss->flag | 2;
2180 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2181 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2182 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2183 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2184 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2185 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2186 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2187 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2190 * SDM says that segment selectors are loaded before segment
2193 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2194 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2195 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2196 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2197 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2200 * Now load segment descriptors. If fault happenes at this stage
2201 * it is handled in a context of new task
2203 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2204 if (ret != X86EMUL_CONTINUE)
2206 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2207 if (ret != X86EMUL_CONTINUE)
2209 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2210 if (ret != X86EMUL_CONTINUE)
2212 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2213 if (ret != X86EMUL_CONTINUE)
2215 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2216 if (ret != X86EMUL_CONTINUE)
2219 return X86EMUL_CONTINUE;
2222 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2223 u16 tss_selector, u16 old_tss_sel,
2224 ulong old_tss_base, struct desc_struct *new_desc)
2226 struct x86_emulate_ops *ops = ctxt->ops;
2227 struct tss_segment_16 tss_seg;
2229 u32 new_tss_base = get_desc_base(new_desc);
2231 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2233 if (ret != X86EMUL_CONTINUE)
2234 /* FIXME: need to provide precise fault address */
2237 save_state_to_tss16(ctxt, &tss_seg);
2239 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2241 if (ret != X86EMUL_CONTINUE)
2242 /* FIXME: need to provide precise fault address */
2245 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2247 if (ret != X86EMUL_CONTINUE)
2248 /* FIXME: need to provide precise fault address */
2251 if (old_tss_sel != 0xffff) {
2252 tss_seg.prev_task_link = old_tss_sel;
2254 ret = ops->write_std(ctxt, new_tss_base,
2255 &tss_seg.prev_task_link,
2256 sizeof tss_seg.prev_task_link,
2258 if (ret != X86EMUL_CONTINUE)
2259 /* FIXME: need to provide precise fault address */
2263 return load_state_from_tss16(ctxt, &tss_seg);
2266 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2267 struct tss_segment_32 *tss)
2269 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2270 tss->eip = ctxt->_eip;
2271 tss->eflags = ctxt->eflags;
2272 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2273 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2274 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2275 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2276 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2277 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2278 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2279 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2281 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2282 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2283 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2284 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2285 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2286 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2287 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2290 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2291 struct tss_segment_32 *tss)
2295 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2296 return emulate_gp(ctxt, 0);
2297 ctxt->_eip = tss->eip;
2298 ctxt->eflags = tss->eflags | 2;
2299 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2300 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2301 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2302 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2303 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2304 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2305 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2306 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2309 * SDM says that segment selectors are loaded before segment
2312 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2313 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2314 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2315 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2316 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2317 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2318 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2321 * Now load segment descriptors. If fault happenes at this stage
2322 * it is handled in a context of new task
2324 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2325 if (ret != X86EMUL_CONTINUE)
2327 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2328 if (ret != X86EMUL_CONTINUE)
2330 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2331 if (ret != X86EMUL_CONTINUE)
2333 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2334 if (ret != X86EMUL_CONTINUE)
2336 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2337 if (ret != X86EMUL_CONTINUE)
2339 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2340 if (ret != X86EMUL_CONTINUE)
2342 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2343 if (ret != X86EMUL_CONTINUE)
2346 return X86EMUL_CONTINUE;
2349 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2350 u16 tss_selector, u16 old_tss_sel,
2351 ulong old_tss_base, struct desc_struct *new_desc)
2353 struct x86_emulate_ops *ops = ctxt->ops;
2354 struct tss_segment_32 tss_seg;
2356 u32 new_tss_base = get_desc_base(new_desc);
2358 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2360 if (ret != X86EMUL_CONTINUE)
2361 /* FIXME: need to provide precise fault address */
2364 save_state_to_tss32(ctxt, &tss_seg);
2366 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2368 if (ret != X86EMUL_CONTINUE)
2369 /* FIXME: need to provide precise fault address */
2372 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2374 if (ret != X86EMUL_CONTINUE)
2375 /* FIXME: need to provide precise fault address */
2378 if (old_tss_sel != 0xffff) {
2379 tss_seg.prev_task_link = old_tss_sel;
2381 ret = ops->write_std(ctxt, new_tss_base,
2382 &tss_seg.prev_task_link,
2383 sizeof tss_seg.prev_task_link,
2385 if (ret != X86EMUL_CONTINUE)
2386 /* FIXME: need to provide precise fault address */
2390 return load_state_from_tss32(ctxt, &tss_seg);
2393 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2394 u16 tss_selector, int reason,
2395 bool has_error_code, u32 error_code)
2397 struct x86_emulate_ops *ops = ctxt->ops;
2398 struct desc_struct curr_tss_desc, next_tss_desc;
2400 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2401 ulong old_tss_base =
2402 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2405 /* FIXME: old_tss_base == ~0 ? */
2407 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2408 if (ret != X86EMUL_CONTINUE)
2410 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2411 if (ret != X86EMUL_CONTINUE)
2414 /* FIXME: check that next_tss_desc is tss */
2416 if (reason != TASK_SWITCH_IRET) {
2417 if ((tss_selector & 3) > next_tss_desc.dpl ||
2418 ops->cpl(ctxt) > next_tss_desc.dpl)
2419 return emulate_gp(ctxt, 0);
2422 desc_limit = desc_limit_scaled(&next_tss_desc);
2423 if (!next_tss_desc.p ||
2424 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2425 desc_limit < 0x2b)) {
2426 emulate_ts(ctxt, tss_selector & 0xfffc);
2427 return X86EMUL_PROPAGATE_FAULT;
2430 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2431 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2432 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2435 if (reason == TASK_SWITCH_IRET)
2436 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2438 /* set back link to prev task only if NT bit is set in eflags
2439 note that old_tss_sel is not used afetr this point */
2440 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2441 old_tss_sel = 0xffff;
2443 if (next_tss_desc.type & 8)
2444 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2445 old_tss_base, &next_tss_desc);
2447 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2448 old_tss_base, &next_tss_desc);
2449 if (ret != X86EMUL_CONTINUE)
2452 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2453 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2455 if (reason != TASK_SWITCH_IRET) {
2456 next_tss_desc.type |= (1 << 1); /* set busy flag */
2457 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2460 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2461 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2463 if (has_error_code) {
2464 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2465 ctxt->lock_prefix = 0;
2466 ctxt->src.val = (unsigned long) error_code;
2467 ret = em_push(ctxt);
2473 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2474 u16 tss_selector, int reason,
2475 bool has_error_code, u32 error_code)
2479 ctxt->_eip = ctxt->eip;
2480 ctxt->dst.type = OP_NONE;
2482 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2483 has_error_code, error_code);
2485 if (rc == X86EMUL_CONTINUE)
2486 ctxt->eip = ctxt->_eip;
2488 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2491 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2492 int reg, struct operand *op)
2494 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2496 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2497 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2498 op->addr.mem.seg = seg;
2501 static int em_das(struct x86_emulate_ctxt *ctxt)
2504 bool af, cf, old_cf;
2506 cf = ctxt->eflags & X86_EFLAGS_CF;
2512 af = ctxt->eflags & X86_EFLAGS_AF;
2513 if ((al & 0x0f) > 9 || af) {
2515 cf = old_cf | (al >= 250);
2520 if (old_al > 0x99 || old_cf) {
2526 /* Set PF, ZF, SF */
2527 ctxt->src.type = OP_IMM;
2529 ctxt->src.bytes = 1;
2530 emulate_2op_SrcV(ctxt, "or");
2531 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2533 ctxt->eflags |= X86_EFLAGS_CF;
2535 ctxt->eflags |= X86_EFLAGS_AF;
2536 return X86EMUL_CONTINUE;
2539 static int em_call(struct x86_emulate_ctxt *ctxt)
2541 long rel = ctxt->src.val;
2543 ctxt->src.val = (unsigned long)ctxt->_eip;
2545 return em_push(ctxt);
2548 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2554 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2555 old_eip = ctxt->_eip;
2557 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2558 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2559 return X86EMUL_CONTINUE;
2562 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2564 ctxt->src.val = old_cs;
2566 if (rc != X86EMUL_CONTINUE)
2569 ctxt->src.val = old_eip;
2570 return em_push(ctxt);
2573 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2577 ctxt->dst.type = OP_REG;
2578 ctxt->dst.addr.reg = &ctxt->_eip;
2579 ctxt->dst.bytes = ctxt->op_bytes;
2580 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2581 if (rc != X86EMUL_CONTINUE)
2583 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2584 return X86EMUL_CONTINUE;
2587 static int em_add(struct x86_emulate_ctxt *ctxt)
2589 emulate_2op_SrcV(ctxt, "add");
2590 return X86EMUL_CONTINUE;
2593 static int em_or(struct x86_emulate_ctxt *ctxt)
2595 emulate_2op_SrcV(ctxt, "or");
2596 return X86EMUL_CONTINUE;
2599 static int em_adc(struct x86_emulate_ctxt *ctxt)
2601 emulate_2op_SrcV(ctxt, "adc");
2602 return X86EMUL_CONTINUE;
2605 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2607 emulate_2op_SrcV(ctxt, "sbb");
2608 return X86EMUL_CONTINUE;
2611 static int em_and(struct x86_emulate_ctxt *ctxt)
2613 emulate_2op_SrcV(ctxt, "and");
2614 return X86EMUL_CONTINUE;
2617 static int em_sub(struct x86_emulate_ctxt *ctxt)
2619 emulate_2op_SrcV(ctxt, "sub");
2620 return X86EMUL_CONTINUE;
2623 static int em_xor(struct x86_emulate_ctxt *ctxt)
2625 emulate_2op_SrcV(ctxt, "xor");
2626 return X86EMUL_CONTINUE;
2629 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2631 emulate_2op_SrcV(ctxt, "cmp");
2632 /* Disable writeback. */
2633 ctxt->dst.type = OP_NONE;
2634 return X86EMUL_CONTINUE;
2637 static int em_test(struct x86_emulate_ctxt *ctxt)
2639 emulate_2op_SrcV(ctxt, "test");
2640 /* Disable writeback. */
2641 ctxt->dst.type = OP_NONE;
2642 return X86EMUL_CONTINUE;
2645 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2647 /* Write back the register source. */
2648 ctxt->src.val = ctxt->dst.val;
2649 write_register_operand(&ctxt->src);
2651 /* Write back the memory destination with implicit LOCK prefix. */
2652 ctxt->dst.val = ctxt->src.orig_val;
2653 ctxt->lock_prefix = 1;
2654 return X86EMUL_CONTINUE;
2657 static int em_imul(struct x86_emulate_ctxt *ctxt)
2659 emulate_2op_SrcV_nobyte(ctxt, "imul");
2660 return X86EMUL_CONTINUE;
2663 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2665 ctxt->dst.val = ctxt->src2.val;
2666 return em_imul(ctxt);
2669 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2671 ctxt->dst.type = OP_REG;
2672 ctxt->dst.bytes = ctxt->src.bytes;
2673 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2674 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2676 return X86EMUL_CONTINUE;
2679 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2683 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2684 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2685 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2686 return X86EMUL_CONTINUE;
2689 static int em_mov(struct x86_emulate_ctxt *ctxt)
2691 ctxt->dst.val = ctxt->src.val;
2692 return X86EMUL_CONTINUE;
2695 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2697 if (ctxt->modrm_reg > VCPU_SREG_GS)
2698 return emulate_ud(ctxt);
2700 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2701 return X86EMUL_CONTINUE;
2704 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2706 u16 sel = ctxt->src.val;
2708 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2709 return emulate_ud(ctxt);
2711 if (ctxt->modrm_reg == VCPU_SREG_SS)
2712 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2714 /* Disable writeback. */
2715 ctxt->dst.type = OP_NONE;
2716 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2719 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2721 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2722 return X86EMUL_CONTINUE;
2725 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2730 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2731 if (rc == X86EMUL_CONTINUE)
2732 ctxt->ops->invlpg(ctxt, linear);
2733 /* Disable writeback. */
2734 ctxt->dst.type = OP_NONE;
2735 return X86EMUL_CONTINUE;
2738 static int em_clts(struct x86_emulate_ctxt *ctxt)
2742 cr0 = ctxt->ops->get_cr(ctxt, 0);
2744 ctxt->ops->set_cr(ctxt, 0, cr0);
2745 return X86EMUL_CONTINUE;
2748 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2752 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2753 return X86EMUL_UNHANDLEABLE;
2755 rc = ctxt->ops->fix_hypercall(ctxt);
2756 if (rc != X86EMUL_CONTINUE)
2759 /* Let the processor re-execute the fixed hypercall */
2760 ctxt->_eip = ctxt->eip;
2761 /* Disable writeback. */
2762 ctxt->dst.type = OP_NONE;
2763 return X86EMUL_CONTINUE;
2766 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2768 struct desc_ptr desc_ptr;
2771 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2772 &desc_ptr.size, &desc_ptr.address,
2774 if (rc != X86EMUL_CONTINUE)
2776 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2777 /* Disable writeback. */
2778 ctxt->dst.type = OP_NONE;
2779 return X86EMUL_CONTINUE;
2782 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2786 rc = ctxt->ops->fix_hypercall(ctxt);
2788 /* Disable writeback. */
2789 ctxt->dst.type = OP_NONE;
2793 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2795 struct desc_ptr desc_ptr;
2798 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2799 &desc_ptr.size, &desc_ptr.address,
2801 if (rc != X86EMUL_CONTINUE)
2803 ctxt->ops->set_idt(ctxt, &desc_ptr);
2804 /* Disable writeback. */
2805 ctxt->dst.type = OP_NONE;
2806 return X86EMUL_CONTINUE;
2809 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2811 ctxt->dst.bytes = 2;
2812 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2813 return X86EMUL_CONTINUE;
2816 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2818 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2819 | (ctxt->src.val & 0x0f));
2820 ctxt->dst.type = OP_NONE;
2821 return X86EMUL_CONTINUE;
2824 static int em_loop(struct x86_emulate_ctxt *ctxt)
2826 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2827 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2828 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2829 jmp_rel(ctxt, ctxt->src.val);
2831 return X86EMUL_CONTINUE;
2834 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2836 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2837 jmp_rel(ctxt, ctxt->src.val);
2839 return X86EMUL_CONTINUE;
2842 static int em_cli(struct x86_emulate_ctxt *ctxt)
2844 if (emulator_bad_iopl(ctxt))
2845 return emulate_gp(ctxt, 0);
2847 ctxt->eflags &= ~X86_EFLAGS_IF;
2848 return X86EMUL_CONTINUE;
2851 static int em_sti(struct x86_emulate_ctxt *ctxt)
2853 if (emulator_bad_iopl(ctxt))
2854 return emulate_gp(ctxt, 0);
2856 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2857 ctxt->eflags |= X86_EFLAGS_IF;
2858 return X86EMUL_CONTINUE;
2861 static bool valid_cr(int nr)
2873 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2875 if (!valid_cr(ctxt->modrm_reg))
2876 return emulate_ud(ctxt);
2878 return X86EMUL_CONTINUE;
2881 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2883 u64 new_val = ctxt->src.val64;
2884 int cr = ctxt->modrm_reg;
2887 static u64 cr_reserved_bits[] = {
2888 0xffffffff00000000ULL,
2889 0, 0, 0, /* CR3 checked later */
2896 return emulate_ud(ctxt);
2898 if (new_val & cr_reserved_bits[cr])
2899 return emulate_gp(ctxt, 0);
2904 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2905 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2906 return emulate_gp(ctxt, 0);
2908 cr4 = ctxt->ops->get_cr(ctxt, 4);
2909 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2911 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2912 !(cr4 & X86_CR4_PAE))
2913 return emulate_gp(ctxt, 0);
2920 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2921 if (efer & EFER_LMA)
2922 rsvd = CR3_L_MODE_RESERVED_BITS;
2923 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2924 rsvd = CR3_PAE_RESERVED_BITS;
2925 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2926 rsvd = CR3_NONPAE_RESERVED_BITS;
2929 return emulate_gp(ctxt, 0);
2936 cr4 = ctxt->ops->get_cr(ctxt, 4);
2937 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2939 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2940 return emulate_gp(ctxt, 0);
2946 return X86EMUL_CONTINUE;
2949 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2953 ctxt->ops->get_dr(ctxt, 7, &dr7);
2955 /* Check if DR7.Global_Enable is set */
2956 return dr7 & (1 << 13);
2959 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2961 int dr = ctxt->modrm_reg;
2965 return emulate_ud(ctxt);
2967 cr4 = ctxt->ops->get_cr(ctxt, 4);
2968 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2969 return emulate_ud(ctxt);
2971 if (check_dr7_gd(ctxt))
2972 return emulate_db(ctxt);
2974 return X86EMUL_CONTINUE;
2977 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2979 u64 new_val = ctxt->src.val64;
2980 int dr = ctxt->modrm_reg;
2982 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2983 return emulate_gp(ctxt, 0);
2985 return check_dr_read(ctxt);
2988 static int check_svme(struct x86_emulate_ctxt *ctxt)
2992 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2994 if (!(efer & EFER_SVME))
2995 return emulate_ud(ctxt);
2997 return X86EMUL_CONTINUE;
3000 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3002 u64 rax = ctxt->regs[VCPU_REGS_RAX];
3004 /* Valid physical address? */
3005 if (rax & 0xffff000000000000ULL)
3006 return emulate_gp(ctxt, 0);
3008 return check_svme(ctxt);
3011 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3013 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3015 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3016 return emulate_ud(ctxt);
3018 return X86EMUL_CONTINUE;
3021 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3023 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3024 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3026 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3028 return emulate_gp(ctxt, 0);
3030 return X86EMUL_CONTINUE;
3033 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3035 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3036 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3037 return emulate_gp(ctxt, 0);
3039 return X86EMUL_CONTINUE;
3042 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3044 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3045 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3046 return emulate_gp(ctxt, 0);
3048 return X86EMUL_CONTINUE;
3051 #define D(_y) { .flags = (_y) }
3052 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3053 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3054 .check_perm = (_p) }
3056 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3057 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3058 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3059 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3060 #define II(_f, _e, _i) \
3061 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3062 #define IIP(_f, _e, _i, _p) \
3063 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3064 .check_perm = (_p) }
3065 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3067 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3068 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3069 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3071 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3072 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3073 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3075 static struct opcode group7_rm1[] = {
3076 DI(SrcNone | ModRM | Priv, monitor),
3077 DI(SrcNone | ModRM | Priv, mwait),
3081 static struct opcode group7_rm3[] = {
3082 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3083 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3084 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3085 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3086 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3087 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3088 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3089 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3092 static struct opcode group7_rm7[] = {
3094 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3098 static struct opcode group1[] = {
3109 static struct opcode group1A[] = {
3110 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3113 static struct opcode group3[] = {
3114 I(DstMem | SrcImm | ModRM, em_test),
3115 I(DstMem | SrcImm | ModRM, em_test),
3116 I(DstMem | SrcNone | ModRM | Lock, em_not),
3117 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3118 I(SrcMem | ModRM, em_mul_ex),
3119 I(SrcMem | ModRM, em_imul_ex),
3120 I(SrcMem | ModRM, em_div_ex),
3121 I(SrcMem | ModRM, em_idiv_ex),
3124 static struct opcode group4[] = {
3125 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3129 static struct opcode group5[] = {
3130 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3131 D(SrcMem | ModRM | Stack),
3132 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3133 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3134 D(SrcMem | ModRM | Stack), N,
3137 static struct opcode group6[] = {
3138 DI(ModRM | Prot, sldt),
3139 DI(ModRM | Prot, str),
3140 DI(ModRM | Prot | Priv, lldt),
3141 DI(ModRM | Prot | Priv, ltr),