Merge branch 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle...
[pandora-kernel.git] / arch / x86 / kvm / svm.c
index 284c34c..25bd1bc 100644 (file)
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_LBRV           (1 <<  1)
 #define SVM_FEATURE_SVML           (1 <<  2)
 #define SVM_FEATURE_NRIP           (1 <<  3)
+#define SVM_FEATURE_TSC_RATE       (1 <<  4)
+#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
+#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
+#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
 
 #define NESTED_EXIT_HOST       0       /* Exit handled on host level */
@@ -189,10 +193,20 @@ enum {
        VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
                            pause filter count */
        VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
+       VMCB_ASID,       /* ASID */
+       VMCB_INTR,       /* int_ctl, int_vector */
+       VMCB_NPT,        /* npt_en, nCR3, gPAT */
+       VMCB_CR,         /* CR0, CR3, CR4, EFER */
+       VMCB_DR,         /* DR6, DR7 */
+       VMCB_DT,         /* GDT, IDT */
+       VMCB_SEG,        /* CS, DS, SS, ES, CPL */
+       VMCB_CR2,        /* CR2 only */
+       VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
        VMCB_DIRTY_MAX,
 };
 
-#define VMCB_ALWAYS_DIRTY_MASK 0U
+/* TPR and CR2 are always written before VMRUN */
+#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
 
 static inline void mark_all_dirty(struct vmcb *vmcb)
 {
@@ -411,16 +425,6 @@ static inline void invlpga(unsigned long addr, u32 asid)
        asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
 }
 
-static inline void force_new_asid(struct kvm_vcpu *vcpu)
-{
-       to_svm(vcpu)->asid_generation--;
-}
-
-static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
-{
-       force_new_asid(vcpu);
-}
-
 static int get_npt_level(void)
 {
 #ifdef CONFIG_X86_64
@@ -437,6 +441,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
                efer &= ~EFER_LME;
 
        to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
+       mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static int is_external_interrupt(u32 info)
@@ -474,7 +479,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
                svm->next_rip = svm->vmcb->control.next_rip;
 
        if (!svm->next_rip) {
-               if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
+               if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
                                EMULATE_DONE)
                        printk(KERN_DEBUG "%s: NOP\n", __func__);
                return;
@@ -934,6 +939,7 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_intercept(svm, INTERCEPT_WBINVD);
        set_intercept(svm, INTERCEPT_MONITOR);
        set_intercept(svm, INTERCEPT_MWAIT);
+       set_intercept(svm, INTERCEPT_XSETBV);
 
        control->iopm_base_pa = iopm_base;
        control->msrpm_base_pa = __pa(svm->msrpm);
@@ -993,7 +999,7 @@ static void init_vmcb(struct vcpu_svm *svm)
                save->cr3 = 0;
                save->cr4 = 0;
        }
-       force_new_asid(&svm->vcpu);
+       svm->asid_generation = 0;
 
        svm->nested.vmcb = 0;
        svm->vcpu.arch.hflags = 0;
@@ -1168,7 +1174,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
        switch (reg) {
        case VCPU_EXREG_PDPTR:
                BUG_ON(!npt_enabled);
-               load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
                break;
        default:
                BUG();
@@ -1297,6 +1303,7 @@ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 
        svm->vmcb->save.idtr.limit = dt->size;
        svm->vmcb->save.idtr.base = dt->address ;
+       mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
@@ -1313,12 +1320,17 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 
        svm->vmcb->save.gdtr.limit = dt->size;
        svm->vmcb->save.gdtr.base = dt->address ;
+       mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
 
+static void svm_decache_cr3(struct kvm_vcpu *vcpu)
+{
+}
+
 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
@@ -1334,6 +1346,7 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
                *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
                        | (gcr0 & SVM_CR0_SELECTIVE_MASK);
 
+       mark_dirty(svm->vmcb, VMCB_CR);
 
        if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
                clr_cr_intercept(svm, INTERCEPT_CR0_READ);
@@ -1400,6 +1413,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
         */
        cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
+       mark_dirty(svm->vmcb, VMCB_CR);
        update_cr0_intercept(svm);
 }
 
@@ -1409,13 +1423,14 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
 
        if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
-               force_new_asid(vcpu);
+               svm_flush_tlb(vcpu);
 
        vcpu->arch.cr4 = cr4;
        if (!npt_enabled)
                cr4 |= X86_CR4_PAE;
        cr4 |= host_cr4_mce;
        to_svm(vcpu)->vmcb->save.cr4 = cr4;
+       mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -1444,6 +1459,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
                        = (svm->vmcb->save.cs.attrib
                           >> SVM_SELECTOR_DPL_SHIFT) & 3;
 
+       mark_dirty(svm->vmcb, VMCB_SEG);
 }
 
 static void update_db_intercept(struct kvm_vcpu *vcpu)
@@ -1475,6 +1491,8 @@ static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
        else
                svm->vmcb->save.dr7 = vcpu->arch.dr7;
 
+       mark_dirty(svm->vmcb, VMCB_DR);
+
        update_db_intercept(vcpu);
 }
 
@@ -1488,6 +1506,8 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 
        svm->asid_generation = sd->asid_generation;
        svm->vmcb->control.asid = sd->next_asid++;
+
+       mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
@@ -1495,6 +1515,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->save.dr7 = value;
+       mark_dirty(svm->vmcb, VMCB_DR);
 }
 
 static int pf_interception(struct vcpu_svm *svm)
@@ -1510,7 +1531,9 @@ static int pf_interception(struct vcpu_svm *svm)
                trace_kvm_page_fault(fault_address, error_code);
                if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
                        kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
-               r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
+               r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+                       svm->vmcb->control.insn_bytes,
+                       svm->vmcb->control.insn_len);
                break;
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                svm->apf_reason = 0;
@@ -1573,7 +1596,7 @@ static int ud_interception(struct vcpu_svm *svm)
 {
        int er;
 
-       er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
+       er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
        if (er != EMULATE_DONE)
                kvm_queue_exception(&svm->vcpu, UD_VECTOR);
        return 1;
@@ -1690,7 +1713,7 @@ static int io_interception(struct vcpu_svm *svm)
        string = (io_info & SVM_IOIO_STR_MASK) != 0;
        in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
        if (string || in)
-               return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+               return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -1744,7 +1767,8 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->control.nested_cr3 = root;
-       force_new_asid(vcpu);
+       mark_dirty(svm->vmcb, VMCB_NPT);
+       svm_flush_tlb(vcpu);
 }
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -2096,7 +2120,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        nested_vmcb->save.idtr   = vmcb->save.idtr;
        nested_vmcb->save.efer   = svm->vcpu.arch.efer;
        nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
-       nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
+       nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
        nested_vmcb->save.cr2    = vmcb->save.cr2;
        nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
        nested_vmcb->save.rflags = vmcb->save.rflags;
@@ -2291,7 +2315,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        if (npt_enabled)
                hsave->save.cr3    = vmcb->save.cr3;
        else
-               hsave->save.cr3    = svm->vcpu.arch.cr3;
+               hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
 
        copy_vmcb_control_area(hsave, vmcb);
 
@@ -2348,7 +2372,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
        svm->nested.intercept            = nested_vmcb->control.intercept;
 
-       force_new_asid(&svm->vcpu);
+       svm_flush_tlb(&svm->vcpu);
        svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
        if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
                svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -2505,6 +2529,8 @@ static int clgi_interception(struct vcpu_svm *svm)
        svm_clear_vintr(svm);
        svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 
+       mark_dirty(svm->vmcb, VMCB_INTR);
+
        return 1;
 }
 
@@ -2531,6 +2557,19 @@ static int skinit_interception(struct vcpu_svm *svm)
        return 1;
 }
 
+static int xsetbv_interception(struct vcpu_svm *svm)
+{
+       u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
+       u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+
+       if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
+               svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+               skip_emulated_instruction(&svm->vcpu);
+       }
+
+       return 1;
+}
+
 static int invalid_op_interception(struct vcpu_svm *svm)
 {
        kvm_queue_exception(&svm->vcpu, UD_VECTOR);
@@ -2619,12 +2658,85 @@ static int iret_interception(struct vcpu_svm *svm)
 
 static int invlpg_interception(struct vcpu_svm *svm)
 {
-       return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+       if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+               return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+
+       kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
+       skip_emulated_instruction(&svm->vcpu);
+       return 1;
 }
 
 static int emulate_on_interception(struct vcpu_svm *svm)
 {
-       return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+       return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+}
+
+#define CR_VALID (1ULL << 63)
+
+static int cr_interception(struct vcpu_svm *svm)
+{
+       int reg, cr;
+       unsigned long val;
+       int err;
+
+       if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+               return emulate_on_interception(svm);
+
+       if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
+               return emulate_on_interception(svm);
+
+       reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+       cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+
+       err = 0;
+       if (cr >= 16) { /* mov to cr */
+               cr -= 16;
+               val = kvm_register_read(&svm->vcpu, reg);
+               switch (cr) {
+               case 0:
+                       err = kvm_set_cr0(&svm->vcpu, val);
+                       break;
+               case 3:
+                       err = kvm_set_cr3(&svm->vcpu, val);
+                       break;
+               case 4:
+                       err = kvm_set_cr4(&svm->vcpu, val);
+                       break;
+               case 8:
+                       err = kvm_set_cr8(&svm->vcpu, val);
+                       break;
+               default:
+                       WARN(1, "unhandled write to CR%d", cr);
+                       kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+                       return 1;
+               }
+       } else { /* mov from cr */
+               switch (cr) {
+               case 0:
+                       val = kvm_read_cr0(&svm->vcpu);
+                       break;
+               case 2:
+                       val = svm->vcpu.arch.cr2;
+                       break;
+               case 3:
+                       val = kvm_read_cr3(&svm->vcpu);
+                       break;
+               case 4:
+                       val = kvm_read_cr4(&svm->vcpu);
+                       break;
+               case 8:
+                       val = kvm_get_cr8(&svm->vcpu);
+                       break;
+               default:
+                       WARN(1, "unhandled read from CR%d", cr);
+                       kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+                       return 1;
+               }
+               kvm_register_write(&svm->vcpu, reg, val);
+       }
+       kvm_complete_insn_gp(&svm->vcpu, err);
+
+       return 1;
 }
 
 static int cr0_write_interception(struct vcpu_svm *svm)
@@ -2632,7 +2744,7 @@ static int cr0_write_interception(struct vcpu_svm *svm)
        struct kvm_vcpu *vcpu = &svm->vcpu;
        int r;
 
-       r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+       r = cr_interception(svm);
 
        if (svm->nested.vmexit_rip) {
                kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2641,22 +2753,47 @@ static int cr0_write_interception(struct vcpu_svm *svm)
                svm->nested.vmexit_rip = 0;
        }
 
-       return r == EMULATE_DONE;
+       return r;
+}
+
+static int dr_interception(struct vcpu_svm *svm)
+{
+       int reg, dr;
+       unsigned long val;
+       int err;
+
+       if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
+               return emulate_on_interception(svm);
+
+       reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+       dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+
+       if (dr >= 16) { /* mov to DRn */
+               val = kvm_register_read(&svm->vcpu, reg);
+               kvm_set_dr(&svm->vcpu, dr - 16, val);
+       } else {
+               err = kvm_get_dr(&svm->vcpu, dr, &val);
+               if (!err)
+                       kvm_register_write(&svm->vcpu, reg, val);
+       }
+
+       return 1;
 }
 
 static int cr8_write_interception(struct vcpu_svm *svm)
 {
        struct kvm_run *kvm_run = svm->vcpu.run;
+       int r;
 
        u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
        /* instruction emulation calls kvm_set_cr8() */
-       emulate_instruction(&svm->vcpu, 0, 0, 0);
+       r = cr_interception(svm);
        if (irqchip_in_kernel(svm->vcpu.kvm)) {
                clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
-               return 1;
+               return r;
        }
        if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
-               return 1;
+               return r;
        kvm_run->exit_reason = KVM_EXIT_SET_TPR;
        return 0;
 }
@@ -2823,6 +2960,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
                        return 1;
 
                svm->vmcb->save.dbgctl = data;
+               mark_dirty(svm->vmcb, VMCB_LBR);
                if (data & (1ULL<<0))
                        svm_enable_lbrv(svm);
                else
@@ -2875,6 +3013,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
        kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
        svm_clear_vintr(svm);
        svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+       mark_dirty(svm->vmcb, VMCB_INTR);
        /*
         * If the user space waits to inject interrupts, exit as soon as
         * possible
@@ -2897,31 +3036,31 @@ static int pause_interception(struct vcpu_svm *svm)
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-       [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
+       [SVM_EXIT_READ_CR0]                     = cr_interception,
+       [SVM_EXIT_READ_CR3]                     = cr_interception,
+       [SVM_EXIT_READ_CR4]                     = cr_interception,
+       [SVM_EXIT_READ_CR8]                     = cr_interception,
        [SVM_EXIT_CR0_SEL_WRITE]                = emulate_on_interception,
        [SVM_EXIT_WRITE_CR0]                    = cr0_write_interception,
-       [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
+       [SVM_EXIT_WRITE_CR3]                    = cr_interception,
+       [SVM_EXIT_WRITE_CR4]                    = cr_interception,
        [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
-       [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
-       [SVM_EXIT_READ_DR1]                     = emulate_on_interception,
-       [SVM_EXIT_READ_DR2]                     = emulate_on_interception,
-       [SVM_EXIT_READ_DR3]                     = emulate_on_interception,
-       [SVM_EXIT_READ_DR4]                     = emulate_on_interception,
-       [SVM_EXIT_READ_DR5]                     = emulate_on_interception,
-       [SVM_EXIT_READ_DR6]                     = emulate_on_interception,
-       [SVM_EXIT_READ_DR7]                     = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR0]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR1]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR2]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR3]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR4]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR5]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR6]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_DR7]                    = emulate_on_interception,
+       [SVM_EXIT_READ_DR0]                     = dr_interception,
+       [SVM_EXIT_READ_DR1]                     = dr_interception,
+       [SVM_EXIT_READ_DR2]                     = dr_interception,
+       [SVM_EXIT_READ_DR3]                     = dr_interception,
+       [SVM_EXIT_READ_DR4]                     = dr_interception,
+       [SVM_EXIT_READ_DR5]                     = dr_interception,
+       [SVM_EXIT_READ_DR6]                     = dr_interception,
+       [SVM_EXIT_READ_DR7]                     = dr_interception,
+       [SVM_EXIT_WRITE_DR0]                    = dr_interception,
+       [SVM_EXIT_WRITE_DR1]                    = dr_interception,
+       [SVM_EXIT_WRITE_DR2]                    = dr_interception,
+       [SVM_EXIT_WRITE_DR3]                    = dr_interception,
+       [SVM_EXIT_WRITE_DR4]                    = dr_interception,
+       [SVM_EXIT_WRITE_DR5]                    = dr_interception,
+       [SVM_EXIT_WRITE_DR6]                    = dr_interception,
+       [SVM_EXIT_WRITE_DR7]                    = dr_interception,
        [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
        [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
        [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
@@ -2954,6 +3093,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_WBINVD]                       = emulate_on_interception,
        [SVM_EXIT_MONITOR]                      = invalid_op_interception,
        [SVM_EXIT_MWAIT]                        = invalid_op_interception,
+       [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_NPF]                          = pf_interception,
 };
 
@@ -3141,7 +3281,6 @@ static void pre_svm_run(struct vcpu_svm *svm)
 
        struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
-       svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
        /* FIXME: handle wraparound of asid_generation */
        if (svm->asid_generation != sd->asid_generation)
                new_asid(svm, sd);
@@ -3166,6 +3305,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
        control->int_ctl &= ~V_INTR_PRIO_MASK;
        control->int_ctl |= V_IRQ_MASK |
                ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
+       mark_dirty(svm->vmcb, VMCB_INTR);
 }
 
 static void svm_set_irq(struct kvm_vcpu *vcpu)
@@ -3285,7 +3425,12 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       force_new_asid(vcpu);
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
+               svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+       else
+               svm->asid_generation--;
 }
 
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
@@ -3511,6 +3656,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        svm->next_rip = 0;
 
+       svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+
        /* if exit due to PF check for async PF */
        if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
                svm->apf_reason = kvm_read_and_reset_pf_reason();
@@ -3538,7 +3685,8 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->save.cr3 = root;
-       force_new_asid(vcpu);
+       mark_dirty(svm->vmcb, VMCB_CR);
+       svm_flush_tlb(vcpu);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -3546,11 +3694,13 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->control.nested_cr3 = root;
+       mark_dirty(svm->vmcb, VMCB_NPT);
 
        /* Also sync guest cr3 here in case we live migrate */
-       svm->vmcb->save.cr3 = vcpu->arch.cr3;
+       svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
+       mark_dirty(svm->vmcb, VMCB_CR);
 
-       force_new_asid(vcpu);
+       svm_flush_tlb(vcpu);
 }
 
 static int is_disabled(void)
@@ -3597,10 +3747,6 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
        switch (func) {
-       case 0x00000001:
-               /* Mask out xsave bit as long as it is not supported by SVM */
-               entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
-               break;
        case 0x80000001:
                if (nested)
                        entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -3674,6 +3820,7 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
        { SVM_EXIT_WBINVD,                      "wbinvd" },
        { SVM_EXIT_MONITOR,                     "monitor" },
        { SVM_EXIT_MWAIT,                       "mwait" },
+       { SVM_EXIT_XSETBV,                      "xsetbv" },
        { SVM_EXIT_NPF,                         "npf" },
        { -1, NULL }
 };
@@ -3728,6 +3875,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .get_cpl = svm_get_cpl,
        .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
        .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
+       .decache_cr3 = svm_decache_cr3,
        .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
        .set_cr0 = svm_set_cr0,
        .set_cr3 = svm_set_cr3,