KVM: Rename vcpu->shadow_efer to efer
[pandora-kernel.git] / arch / x86 / kvm / x86.c
index 3b81cb9..27af6e3 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/intel-iommu.h>
 #include <linux/cpufreq.h>
 #include <linux/user-return-notifier.h>
+#include <linux/srcu.h>
 #include <trace/events/kvm.h>
 #undef TRACE_INCLUDE_FILE
 #define CREATE_TRACE_POINTS
@@ -427,12 +428,18 @@ out:
 
 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
-       if (cr0 & CR0_RESERVED_BITS) {
+       cr0 |= X86_CR0_ET;
+
+#ifdef CONFIG_X86_64
+       if (cr0 & 0xffffffff00000000UL) {
                printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
-                      cr0, vcpu->arch.cr0);
+                      cr0, kvm_read_cr0(vcpu));
                kvm_inject_gp(vcpu, 0);
                return;
        }
+#endif
+
+       cr0 &= ~CR0_RESERVED_BITS;
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
                printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
@@ -449,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
 #ifdef CONFIG_X86_64
-               if ((vcpu->arch.shadow_efer & EFER_LME)) {
+               if ((vcpu->arch.efer & EFER_LME)) {
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
@@ -487,7 +494,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
 
 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 {
-       kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+       kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
@@ -619,9 +626,11 @@ static inline u32 bit(int bitno)
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN    2
+#define KVM_SAVE_MSRS_BEGIN    5
 static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+       HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
+       HV_X64_MSR_APIC_ASSIST_PAGE,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_K6_STAR,
 #ifdef CONFIG_X86_64
@@ -646,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        }
 
        if (is_paging(vcpu)
-           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
                printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
                kvm_inject_gp(vcpu, 0);
                return;
@@ -677,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        kvm_x86_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
-       efer |= vcpu->arch.shadow_efer & EFER_LMA;
+       efer |= vcpu->arch.efer & EFER_LMA;
 
-       vcpu->arch.shadow_efer = efer;
+       vcpu->arch.efer = efer;
 
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
@@ -1001,6 +1010,100 @@ out:
        return r;
 }
 
+static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+{
+       return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+}
+
+static bool kvm_hv_msr_partition_wide(u32 msr)
+{
+       bool r = false;
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+       case HV_X64_MSR_HYPERCALL:
+               r = true;
+               break;
+       }
+
+       return r;
+}
+
+static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+               kvm->arch.hv_guest_os_id = data;
+               /* setting guest os id to zero disables hypercall page */
+               if (!kvm->arch.hv_guest_os_id)
+                       kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
+               break;
+       case HV_X64_MSR_HYPERCALL: {
+               u64 gfn;
+               unsigned long addr;
+               u8 instructions[4];
+
+               /* if guest os id is not set hypercall should remain disabled */
+               if (!kvm->arch.hv_guest_os_id)
+                       break;
+               if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
+                       kvm->arch.hv_hypercall = data;
+                       break;
+               }
+               gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
+               addr = gfn_to_hva(kvm, gfn);
+               if (kvm_is_error_hva(addr))
+                       return 1;
+               kvm_x86_ops->patch_hypercall(vcpu, instructions);
+               ((unsigned char *)instructions)[3] = 0xc3; /* ret */
+               if (copy_to_user((void __user *)addr, instructions, 4))
+                       return 1;
+               kvm->arch.hv_hypercall = data;
+               break;
+       }
+       default:
+               pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+                         "data 0x%llx\n", msr, data);
+               return 1;
+       }
+       return 0;
+}
+
+static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       switch (msr) {
+       case HV_X64_MSR_APIC_ASSIST_PAGE: {
+               unsigned long addr;
+
+               if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
+                       vcpu->arch.hv_vapic = data;
+                       break;
+               }
+               addr = gfn_to_hva(vcpu->kvm, data >>
+                                 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
+               if (kvm_is_error_hva(addr))
+                       return 1;
+               if (clear_user((void __user *)addr, PAGE_SIZE))
+                       return 1;
+               vcpu->arch.hv_vapic = data;
+               break;
+       }
+       case HV_X64_MSR_EOI:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
+       case HV_X64_MSR_ICR:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
+       case HV_X64_MSR_TPR:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+       default:
+               pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+                         "data 0x%llx\n", msr, data);
+               return 1;
+       }
+
+       return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
@@ -1115,6 +1218,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
                        "0x%x data 0x%llx\n", msr, data);
                break;
+       case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+               if (kvm_hv_msr_partition_wide(msr)) {
+                       int r;
+                       mutex_lock(&vcpu->kvm->lock);
+                       r = set_msr_hyperv_pw(vcpu, msr, data);
+                       mutex_unlock(&vcpu->kvm->lock);
+                       return r;
+               } else
+                       return set_msr_hyperv(vcpu, msr, data);
+               break;
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
@@ -1214,6 +1327,54 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        return 0;
 }
 
+static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+       u64 data = 0;
+       struct kvm *kvm = vcpu->kvm;
+
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+               data = kvm->arch.hv_guest_os_id;
+               break;
+       case HV_X64_MSR_HYPERCALL:
+               data = kvm->arch.hv_hypercall;
+               break;
+       default:
+               pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+               return 1;
+       }
+
+       *pdata = data;
+       return 0;
+}
+
+static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+       u64 data = 0;
+
+       switch (msr) {
+       case HV_X64_MSR_VP_INDEX: {
+               int r;
+               struct kvm_vcpu *v;
+               kvm_for_each_vcpu(r, v, vcpu->kvm)
+                       if (v == vcpu)
+                               data = r;
+               break;
+       }
+       case HV_X64_MSR_EOI:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
+       case HV_X64_MSR_ICR:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
+       case HV_X64_MSR_TPR:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
+       default:
+               pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+               return 1;
+       }
+       *pdata = data;
+       return 0;
+}
+
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
        u64 data;
@@ -1265,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data |= (((uint64_t)4ULL) << 40);
                break;
        case MSR_EFER:
-               data = vcpu->arch.shadow_efer;
+               data = vcpu->arch.efer;
                break;
        case MSR_KVM_WALL_CLOCK:
                data = vcpu->kvm->arch.wall_clock;
@@ -1280,6 +1441,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
                return get_msr_mce(vcpu, msr, pdata);
+       case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+               if (kvm_hv_msr_partition_wide(msr)) {
+                       int r;
+                       mutex_lock(&vcpu->kvm->lock);
+                       r = get_msr_hyperv_pw(vcpu, msr, pdata);
+                       mutex_unlock(&vcpu->kvm->lock);
+                       return r;
+               } else
+                       return get_msr_hyperv(vcpu, msr, pdata);
+               break;
        default:
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -1305,15 +1476,15 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
                    int (*do_msr)(struct kvm_vcpu *vcpu,
                                  unsigned index, u64 *data))
 {
-       int i;
+       int i, idx;
 
        vcpu_load(vcpu);
 
-       down_read(&vcpu->kvm->slots_lock);
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        for (i = 0; i < msrs->nmsrs; ++i)
                if (do_msr(vcpu, entries[i].index, &entries[i].data))
                        break;
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
        vcpu_put(vcpu);
 
@@ -1395,6 +1566,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_XEN_HVM:
        case KVM_CAP_ADJUST_CLOCK:
        case KVM_CAP_VCPU_EVENTS:
+       case KVM_CAP_HYPERV:
+       case KVM_CAP_HYPERV_VAPIC:
+       case KVM_CAP_HYPERV_SPIN:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -1508,8 +1682,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       kvm_x86_ops->vcpu_put(vcpu);
        kvm_put_guest_fpu(vcpu);
+       kvm_x86_ops->vcpu_put(vcpu);
 }
 
 static int is_efer_nx(void)
@@ -1640,10 +1814,12 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                         u32 index, int *nent, int maxnent)
 {
        unsigned f_nx = is_efer_nx() ? F(NX) : 0;
-       unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
 #ifdef CONFIG_X86_64
+       unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
+                               ? F(GBPAGES) : 0;
        unsigned f_lm = F(LM);
 #else
+       unsigned f_gbpages = 0;
        unsigned f_lm = 0;
 #endif
        unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
@@ -2207,14 +2383,14 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
 
-       down_write(&kvm->slots_lock);
+       mutex_lock(&kvm->slots_lock);
        spin_lock(&kvm->mmu_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
        spin_unlock(&kvm->mmu_lock);
-       up_write(&kvm->slots_lock);
+       mutex_unlock(&kvm->slots_lock);
        return 0;
 }
 
@@ -2223,13 +2399,35 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
        return kvm->arch.n_alloc_mmu_pages;
 }
 
+gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
+{
+       int i;
+       struct kvm_mem_alias *alias;
+       struct kvm_mem_aliases *aliases;
+
+       aliases = rcu_dereference(kvm->arch.aliases);
+
+       for (i = 0; i < aliases->naliases; ++i) {
+               alias = &aliases->aliases[i];
+               if (alias->flags & KVM_ALIAS_INVALID)
+                       continue;
+               if (gfn >= alias->base_gfn
+                   && gfn < alias->base_gfn + alias->npages)
+                       return alias->target_gfn + gfn - alias->base_gfn;
+       }
+       return gfn;
+}
+
 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
 {
        int i;
        struct kvm_mem_alias *alias;
+       struct kvm_mem_aliases *aliases;
+
+       aliases = rcu_dereference(kvm->arch.aliases);
 
-       for (i = 0; i < kvm->arch.naliases; ++i) {
-               alias = &kvm->arch.aliases[i];
+       for (i = 0; i < aliases->naliases; ++i) {
+               alias = &aliases->aliases[i];
                if (gfn >= alias->base_gfn
                    && gfn < alias->base_gfn + alias->npages)
                        return alias->target_gfn + gfn - alias->base_gfn;
@@ -2247,6 +2445,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
 {
        int r, n;
        struct kvm_mem_alias *p;
+       struct kvm_mem_aliases *aliases, *old_aliases;
 
        r = -EINVAL;
        /* General sanity checks */
@@ -2263,26 +2462,48 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
            < alias->target_phys_addr)
                goto out;
 
-       down_write(&kvm->slots_lock);
-       spin_lock(&kvm->mmu_lock);
+       r = -ENOMEM;
+       aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+       if (!aliases)
+               goto out;
+
+       mutex_lock(&kvm->slots_lock);
 
-       p = &kvm->arch.aliases[alias->slot];
+       /* invalidate any gfn reference in case of deletion/shrinking */
+       memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+       aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
+       old_aliases = kvm->arch.aliases;
+       rcu_assign_pointer(kvm->arch.aliases, aliases);
+       synchronize_srcu_expedited(&kvm->srcu);
+       kvm_mmu_zap_all(kvm);
+       kfree(old_aliases);
+
+       r = -ENOMEM;
+       aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+       if (!aliases)
+               goto out_unlock;
+
+       memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+
+       p = &aliases->aliases[alias->slot];
        p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
        p->npages = alias->memory_size >> PAGE_SHIFT;
        p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+       p->flags &= ~(KVM_ALIAS_INVALID);
 
        for (n = KVM_ALIAS_SLOTS; n > 0; --n)
-               if (kvm->arch.aliases[n - 1].npages)
+               if (aliases->aliases[n - 1].npages)
                        break;
-       kvm->arch.naliases = n;
-
-       spin_unlock(&kvm->mmu_lock);
-       kvm_mmu_zap_all(kvm);
-
-       up_write(&kvm->slots_lock);
+       aliases->naliases = n;
 
-       return 0;
+       old_aliases = kvm->arch.aliases;
+       rcu_assign_pointer(kvm->arch.aliases, aliases);
+       synchronize_srcu_expedited(&kvm->srcu);
+       kfree(old_aliases);
+       r = 0;
 
+out_unlock:
+       mutex_unlock(&kvm->slots_lock);
 out:
        return r;
 }
@@ -2411,29 +2632,62 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                      struct kvm_dirty_log *log)
 {
-       int r;
-       int n;
+       int r, n, i;
        struct kvm_memory_slot *memslot;
-       int is_dirty = 0;
+       unsigned long is_dirty = 0;
+       unsigned long *dirty_bitmap = NULL;
 
-       down_write(&kvm->slots_lock);
+       mutex_lock(&kvm->slots_lock);
 
-       r = kvm_get_dirty_log(kvm, log, &is_dirty);
-       if (r)
+       r = -EINVAL;
+       if (log->slot >= KVM_MEMORY_SLOTS)
+               goto out;
+
+       memslot = &kvm->memslots->memslots[log->slot];
+       r = -ENOENT;
+       if (!memslot->dirty_bitmap)
+               goto out;
+
+       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+
+       r = -ENOMEM;
+       dirty_bitmap = vmalloc(n);
+       if (!dirty_bitmap)
                goto out;
+       memset(dirty_bitmap, 0, n);
+
+       for (i = 0; !is_dirty && i < n/sizeof(long); i++)
+               is_dirty = memslot->dirty_bitmap[i];
 
        /* If nothing is dirty, don't bother messing with page tables. */
        if (is_dirty) {
+               struct kvm_memslots *slots, *old_slots;
+
                spin_lock(&kvm->mmu_lock);
                kvm_mmu_slot_remove_write_access(kvm, log->slot);
                spin_unlock(&kvm->mmu_lock);
-               memslot = &kvm->memslots->memslots[log->slot];
-               n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
-               memset(memslot->dirty_bitmap, 0, n);
+
+               slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+               if (!slots)
+                       goto out_free;
+
+               memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+               slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+
+               old_slots = kvm->memslots;
+               rcu_assign_pointer(kvm->memslots, slots);
+               synchronize_srcu_expedited(&kvm->srcu);
+               dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
+               kfree(old_slots);
        }
+
        r = 0;
+       if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
+               r = -EFAULT;
+out_free:
+       vfree(dirty_bitmap);
 out:
-       up_write(&kvm->slots_lock);
+       mutex_unlock(&kvm->slots_lock);
        return r;
 }
 
@@ -2546,7 +2800,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
                                   sizeof(struct kvm_pit_config)))
                        goto out;
        create_pit:
-               down_write(&kvm->slots_lock);
+               mutex_lock(&kvm->slots_lock);
                r = -EEXIST;
                if (kvm->arch.vpit)
                        goto create_pit_unlock;
@@ -2555,7 +2809,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
                if (kvm->arch.vpit)
                        r = 0;
        create_pit_unlock:
-               up_write(&kvm->slots_lock);
+               mutex_unlock(&kvm->slots_lock);
                break;
        case KVM_IRQ_LINE_STATUS:
        case KVM_IRQ_LINE: {
@@ -2772,7 +3026,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
            !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
                return 0;
 
-       return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
+       return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
 }
 
 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
@@ -2781,7 +3035,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
            !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
                return 0;
 
-       return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
+       return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
 }
 
 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
@@ -3014,35 +3268,21 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
-       kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
+       kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+       kvm_x86_ops->fpu_activate(vcpu);
        return X86EMUL_CONTINUE;
 }
 
 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
-
-       switch (dr) {
-       case 0 ... 3:
-               *dest = kvm_x86_ops->get_dr(vcpu, dr);
-               return X86EMUL_CONTINUE;
-       default:
-               pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
-               return X86EMUL_UNHANDLEABLE;
-       }
+       return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
 }
 
 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 {
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
-       int exception;
 
-       kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
-       if (exception) {
-               /* FIXME: better handling */
-               return X86EMUL_UNHANDLEABLE;
-       }
-       return X86EMUL_CONTINUE;
+       return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
 }
 
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3266,11 +3506,12 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
        int r;
 
        if (vcpu->arch.pio.in)
-               r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
+               r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
                                    vcpu->arch.pio.size, pd);
        else
-               r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
-                                    vcpu->arch.pio.size, pd);
+               r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+                                    vcpu->arch.pio.port, vcpu->arch.pio.size,
+                                    pd);
        return r;
 }
 
@@ -3281,7 +3522,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu)
        int i, r = 0;
 
        for (i = 0; i < io->cur_count; i++) {
-               if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
+               if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
                                     io->port, io->size, pd)) {
                        r = -EOPNOTSUPP;
                        break;
@@ -3534,11 +3775,76 @@ static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
                return a0 | ((gpa_t)a1 << 32);
 }
 
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+       u64 param, ingpa, outgpa, ret;
+       uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
+       bool fast, longmode;
+       int cs_db, cs_l;
+
+       /*
+        * hypercall generates UD from non zero cpl and real mode
+        * per HYPER-V spec
+        */
+       if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 0;
+       }
+
+       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+       longmode = is_long_mode(vcpu) && cs_l == 1;
+
+       if (!longmode) {
+               param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
+               ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
+               outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+       }
+#ifdef CONFIG_X86_64
+       else {
+               param = kvm_register_read(vcpu, VCPU_REGS_RCX);
+               ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
+               outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+       }
+#endif
+
+       code = param & 0xffff;
+       fast = (param >> 16) & 0x1;
+       rep_cnt = (param >> 32) & 0xfff;
+       rep_idx = (param >> 48) & 0xfff;
+
+       trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+
+       switch (code) {
+       case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+               kvm_vcpu_on_spin(vcpu);
+               break;
+       default:
+               res = HV_STATUS_INVALID_HYPERCALL_CODE;
+               break;
+       }
+
+       ret = res | (((u64)rep_done & 0xfff) << 32);
+       if (longmode) {
+               kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+       } else {
+               kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
+               kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
+       }
+
+       return 1;
+}
+
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
        unsigned long nr, a0, a1, a2, a3, ret;
        int r = 1;
 
+       if (kvm_hv_hypercall_enabled(vcpu->kvm))
+               return kvm_hv_hypercall(vcpu);
+
        nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
        a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
        a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
@@ -3632,7 +3938,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
 
        switch (cr) {
        case 0:
-               value = vcpu->arch.cr0;
+               value = kvm_read_cr0(vcpu);
                break;
        case 2:
                value = vcpu->arch.cr2;
@@ -3659,7 +3965,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
 {
        switch (cr) {
        case 0:
-               kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
+               kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
                *rflags = kvm_get_rflags(vcpu);
                break;
        case 2:
@@ -3820,14 +4126,15 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
 static void vapic_exit(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
+       int idx;
 
        if (!apic || !apic->vapic_addr)
                return;
 
-       down_read(&vcpu->kvm->slots_lock);
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        kvm_release_page_dirty(apic->vapic_page);
        mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 }
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -3923,12 +4230,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        r = 0;
                        goto out;
                }
+               if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
+                       vcpu->fpu_active = 0;
+                       kvm_x86_ops->fpu_deactivate(vcpu);
+               }
        }
 
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
-       kvm_load_guest_fpu(vcpu);
+       if (vcpu->fpu_active)
+               kvm_load_guest_fpu(vcpu);
 
        local_irq_disable();
 
@@ -3956,7 +4268,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                kvm_lapic_sync_to_vapic(vcpu);
        }
 
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
        kvm_guest_enter();
 
@@ -3998,7 +4310,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        preempt_enable();
 
-       down_read(&vcpu->kvm->slots_lock);
+       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        /*
         * Profile KVM exit RIPs:
@@ -4020,6 +4332,7 @@ out:
 static int __vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
+       struct kvm *kvm = vcpu->kvm;
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
                pr_debug("vcpu %d received sipi with vector # %x\n",
@@ -4031,7 +4344,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        }
 
-       down_read(&vcpu->kvm->slots_lock);
+       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
        vapic_enter(vcpu);
 
        r = 1;
@@ -4039,9 +4352,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
                        r = vcpu_enter_guest(vcpu);
                else {
-                       up_read(&vcpu->kvm->slots_lock);
+                       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                        kvm_vcpu_block(vcpu);
-                       down_read(&vcpu->kvm->slots_lock);
+                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                        if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
                        {
                                switch(vcpu->arch.mp_state) {
@@ -4076,13 +4389,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                        ++vcpu->stat.signal_exits;
                }
                if (need_resched()) {
-                       up_read(&vcpu->kvm->slots_lock);
+                       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                        kvm_resched(vcpu);
-                       down_read(&vcpu->kvm->slots_lock);
+                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                }
        }
 
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
        post_kvm_run_save(vcpu);
 
        vapic_exit(vcpu);
@@ -4121,10 +4434,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->mmio_read_completed = 1;
                vcpu->mmio_needed = 0;
 
-               down_read(&vcpu->kvm->slots_lock);
+               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
                                        EMULTYPE_NO_DECODE);
-               up_read(&vcpu->kvm->slots_lock);
+               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r == EMULATE_DO_MMIO) {
                        /*
                         * Read-modify-write.  Back to userspace.
@@ -4251,12 +4564,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        sregs->gdt.limit = dt.limit;
        sregs->gdt.base = dt.base;
 
-       sregs->cr0 = vcpu->arch.cr0;
+       sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
        sregs->cr3 = vcpu->arch.cr3;
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
-       sregs->efer = vcpu->arch.shadow_efer;
+       sregs->efer = vcpu->arch.efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
        memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -4437,7 +4750,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 {
        struct kvm_segment kvm_seg;
 
-       if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
+       if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
                return kvm_load_realmode_segment(vcpu, selector, seg);
        if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
                return 1;
@@ -4715,7 +5028,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
                                              &nseg_desc);
        }
 
-       kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
+       kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
        seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
        tr_seg.type = 11;
        kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
@@ -4746,11 +5059,11 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
-       mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
+       mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
-       mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
+       mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
        kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
        vcpu->arch.cr0 = sregs->cr0;
 
@@ -4789,7 +5102,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        /* Older userspace won't unhalt the vcpu on reset. */
        if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
            sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
-           !(vcpu->arch.cr0 & X86_CR0_PE))
+           !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
        vcpu_put(vcpu);
@@ -4887,11 +5200,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 {
        unsigned long vaddr = tr->linear_address;
        gpa_t gpa;
+       int idx;
 
        vcpu_load(vcpu);
-       down_read(&vcpu->kvm->slots_lock);
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
        tr->physical_address = gpa;
        tr->valid = gpa != UNMAPPED_GVA;
        tr->writeable = 1;
@@ -4972,14 +5286,13 @@ EXPORT_SYMBOL_GPL(fx_init);
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
+       if (vcpu->guest_fpu_loaded)
                return;
 
        vcpu->guest_fpu_loaded = 1;
        kvm_fx_save(&vcpu->arch.host_fx_image);
        kvm_fx_restore(&vcpu->arch.guest_fx_image);
 }
-EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
@@ -4990,8 +5303,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        kvm_fx_save(&vcpu->arch.guest_fx_image);
        kvm_fx_restore(&vcpu->arch.host_fx_image);
        ++vcpu->stat.fpu_reload;
+       set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
 }
-EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
@@ -5143,11 +5456,13 @@ fail:
 
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
+       int idx;
+
        kfree(vcpu->arch.mce_banks);
        kvm_free_lapic(vcpu);
-       down_read(&vcpu->kvm->slots_lock);
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        kvm_mmu_destroy(vcpu);
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
        free_page((unsigned long)vcpu->arch.pio_data);
 }
 
@@ -5158,6 +5473,12 @@ struct  kvm *kvm_arch_create_vm(void)
        if (!kvm)
                return ERR_PTR(-ENOMEM);
 
+       kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+       if (!kvm->arch.aliases) {
+               kfree(kvm);
+               return ERR_PTR(-ENOMEM);
+       }
+
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
@@ -5214,16 +5535,18 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                put_page(kvm->arch.apic_access_page);
        if (kvm->arch.ept_identity_pagetable)
                put_page(kvm->arch.ept_identity_pagetable);
+       cleanup_srcu_struct(&kvm->srcu);
+       kfree(kvm->arch.aliases);
        kfree(kvm);
 }
 
-int kvm_arch_set_memory_region(struct kvm *kvm,
-                               struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                               struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
+                               struct kvm_userspace_memory_region *mem,
                                int user_alloc)
 {
-       int npages = mem->memory_size >> PAGE_SHIFT;
-       struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
+       int npages = memslot->npages;
 
        /*To keep backward compatibility with older userspace,
         *x86 needs to hanlde !user_alloc case.
@@ -5243,26 +5566,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                        if (IS_ERR((void *)userspace_addr))
                                return PTR_ERR((void *)userspace_addr);
 
-                       /* set userspace_addr atomically for kvm_hva_to_rmapp */
-                       spin_lock(&kvm->mmu_lock);
                        memslot->userspace_addr = userspace_addr;
-                       spin_unlock(&kvm->mmu_lock);
-               } else {
-                       if (!old.user_alloc && old.rmap) {
-                               int ret;
-
-                               down_write(&current->mm->mmap_sem);
-                               ret = do_munmap(current->mm, old.userspace_addr,
-                                               old.npages * PAGE_SIZE);
-                               up_write(&current->mm->mmap_sem);
-                               if (ret < 0)
-                                       printk(KERN_WARNING
-                                      "kvm_vm_ioctl_set_memory_region: "
-                                      "failed to munmap memory\n");
-                       }
                }
        }
 
+
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                               struct kvm_userspace_memory_region *mem,
+                               struct kvm_memory_slot old,
+                               int user_alloc)
+{
+
+       int npages = mem->memory_size >> PAGE_SHIFT;
+
+       if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
+               int ret;
+
+               down_write(&current->mm->mmap_sem);
+               ret = do_munmap(current->mm, old.userspace_addr,
+                               old.npages * PAGE_SIZE);
+               up_write(&current->mm->mmap_sem);
+               if (ret < 0)
+                       printk(KERN_WARNING
+                              "kvm_vm_ioctl_set_memory_region: "
+                              "failed to munmap memory\n");
+       }
+
        spin_lock(&kvm->mmu_lock);
        if (!kvm->arch.n_requested_mmu_pages) {
                unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5271,8 +5603,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        spin_unlock(&kvm->mmu_lock);
-
-       return 0;
 }
 
 void kvm_arch_flush_shadow(struct kvm *kvm)