KVM: x86: use new CS.RPL as CPL during task switch
[pandora-kernel.git] / arch / x86 / kvm / emulate.c
index 1bedd52..aa71c95 100644 (file)
@@ -1233,11 +1233,11 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
 }
 
 /* Does not support long mode */
-static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
-                                  u16 selector, int seg)
+static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+                                    u16 selector, int seg, u8 cpl)
 {
        struct desc_struct seg_desc;
-       u8 dpl, rpl, cpl;
+       u8 dpl, rpl;
        unsigned err_vec = GP_VECTOR;
        u32 err_code = 0;
        bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
@@ -1286,7 +1286,6 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
 
        rpl = selector & 3;
        dpl = seg_desc.dpl;
-       cpl = ctxt->ops->cpl(ctxt);
 
        switch (seg) {
        case VCPU_SREG_SS:
@@ -1349,6 +1348,13 @@ exception:
        return X86EMUL_PROPAGATE_FAULT;
 }
 
+static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+                                  u16 selector, int seg)
+{
+       u8 cpl = ctxt->ops->cpl(ctxt);
+       return __load_segment_descriptor(ctxt, selector, seg, cpl);
+}
+
 static void write_register_operand(struct operand *op)
 {
        /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
@@ -2213,6 +2219,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
                                 struct tss_segment_16 *tss)
 {
        int ret;
+       u8 cpl;
 
        ctxt->_eip = tss->ip;
        ctxt->eflags = tss->flag | 2;
@@ -2235,23 +2242,25 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
        set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
        set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
 
+       cpl = tss->cs & 3;
+
        /*
         * Now load segment descriptors. If fault happenes at this stage
         * it is handled in a context of new task
         */
-       ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
+       ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
+       ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
+       ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
+       ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
+       ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
@@ -2330,6 +2339,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
                                 struct tss_segment_32 *tss)
 {
        int ret;
+       u8 cpl;
 
        if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
                return emulate_gp(ctxt, 0);
@@ -2346,7 +2356,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
 
        /*
         * SDM says that segment selectors are loaded before segment
-        * descriptors
+        * descriptors.  This is important because CPL checks will
+        * use CS.RPL.
         */
        set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
        set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
@@ -2356,29 +2367,31 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
        set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
        set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
 
+       cpl = tss->cs & 3;
+
        /*
         * Now load segment descriptors. If fault happenes at this stage
         * it is handled in a context of new task
         */
-       ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
+       ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
+       ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
+       ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
+       ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
+       ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
+       ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
+       ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;