Merge branch 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Jan 2011 18:14:24 +0000 (10:14 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Jan 2011 18:14:24 +0000 (10:14 -0800)
* 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (142 commits)
  KVM: Initialize fpu state in preemptible context
  KVM: VMX: when entering real mode align segment base to 16 bytes
  KVM: MMU: handle 'map_writable' in set_spte() function
  KVM: MMU: audit: allow audit more guests at the same time
  KVM: Fetch guest cr3 from hardware on demand
  KVM: Replace reads of vcpu->arch.cr3 by an accessor
  KVM: MMU: only write protect mappings at pagetable level
  KVM: VMX: Correct asm constraint in vmcs_load()/vmcs_clear()
  KVM: MMU: Initialize base_role for tdp mmus
  KVM: VMX: Optimize atomic EFER load
  KVM: VMX: Add definitions for more vm entry/exit control bits
  KVM: SVM: copy instruction bytes from VMCB
  KVM: SVM: implement enhanced INVLPG intercept
  KVM: SVM: enhance mov DR intercept handler
  KVM: SVM: enhance MOV CR intercept handler
  KVM: SVM: add new SVM feature bit names
  KVM: cleanup emulate_instruction
  KVM: move complete_insn_gp() into x86.c
  KVM: x86: fix CR8 handling
  KVM guest: Fix kvm clock initialization when it's configured out
  ...

1  2 
Documentation/kernel-parameters.txt
Documentation/kvm/api.txt
arch/x86/kernel/entry_64.S
arch/x86/kvm/x86.c

@@@ -403,10 -403,6 +403,10 @@@ and is between 256 and 4096 characters
        bttv.pll=       See Documentation/video4linux/bttv/Insmod-options
        bttv.tuner=     and Documentation/video4linux/bttv/CARDLIST
  
 +      bulk_remove=off [PPC]  This parameter disables the use of the pSeries
 +                      firmware feature for flushing multiple hpte entries
 +                      at a time.
 +
        c101=           [NET] Moxa C101 synchronous serial card
  
        cachesize=      [BUGS=X86-32] Override level 2 CPU cache size detection.
  
        dscc4.setup=    [NET]
  
 -      dynamic_printk  Enables pr_debug()/dev_dbg() calls if
 -                      CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled.
 -                      These can also be switched on/off via
 -                      <debugfs>/dynamic_printk/modules
 -
        earlycon=       [KNL] Output early console device and options.
                uart[8250],io,<addr>[,options]
                uart[8250],mmio,<addr>[,options]
                             controller
        i8042.nopnp     [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
                             controllers
 +      i8042.notimeout [HW] Ignore timeout condition signalled by conroller
        i8042.reset     [HW] Reset the controller during init and cleanup
        i8042.unlock    [HW] Unlock (ignore) the keylock
  
        mtdparts=       [MTD]
                        See drivers/mtd/cmdlinepart.c.
  
 +      multitce=off    [PPC]  This parameter disables the use of the pSeries
 +                      firmware feature for updating multiple TCE entries
 +                      at a time.
 +
        onenand.bdry=   [HW,MTD] Flex-OneNAND Boundary Configuration
  
                        Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
  
        nmi_watchdog=   [KNL,BUGS=X86] Debugging features for SMP kernels
                        Format: [panic,][num]
 -                      Valid num: 0,1,2
 +                      Valid num: 0
                        0 - turn nmi_watchdog off
 -                      1 - use the IO-APIC timer for the NMI watchdog
 -                      2 - use the local APIC for the NMI watchdog using
 -                      a performance counter. Note: This will use one
 -                      performance counter and the local APIC's performance
 -                      vector.
                        When panic is specified, panic when an NMI watchdog
                        timeout occurs.
                        This is useful when you use a panic=... timeout and
                        need the box quickly up again.
 -                      Instead of 1 and 2 it is possible to use the following
 -                      symbolic names: lapic and ioapic
 -                      Example: nmi_watchdog=2 or nmi_watchdog=panic,lapic
  
        netpoll.carrier_timeout=
                        [NET] Specifies amount of time (in seconds) that
        noapic          [SMP,APIC] Tells the kernel to not make use of any
                        IOAPICs that may be present in the system.
  
 +      noautogroup     Disable scheduler automatic task group creation.
 +
        nobats          [PPC] Do not use BATs for mapping kernel lowmem
                        on "Classic" PPC cores.
  
  
        no-kvmclock     [X86,KVM] Disable paravirtualized KVM clock driver
  
+       no-kvmapf       [X86,KVM] Disable paravirtualized asynchronous page
+                       fault handling.
        nolapic         [X86-32,APIC] Do not enable or use the local APIC.
  
        nolapic_timer   [X86-32,APIC] Do not use the local APIC timer.
  
        nousb           [USB] Disable the USB subsystem
  
 -      nowatchdog      [KNL] Disable the lockup detector.
 +      nowatchdog      [KNL] Disable the lockup detector (NMI watchdog).
  
        nowb            [ARM]
  
                        to facilitate early boot debugging.
                        See also Documentation/trace/events.txt
  
 -      tsc=            Disable clocksource-must-verify flag for TSC.
 +      tsc=            Disable clocksource stability checks for TSC.
                        Format: <string>
                        [x86] reliable: mark tsc clocksource as reliable, this
 -                      disables clocksource verification at runtime.
 -                      Used to enable high-resolution timer mode on older
 -                      hardware, and in virtualized environment.
 +                      disables clocksource verification at runtime, as well
 +                      as the stability checks done at bootup. Used to enable
 +                      high-resolution timer mode on older hardware, and in
 +                      virtualized environment.
                        [x86] noirqtime: Do not use TSC to do irq accounting.
                        Used to run time disable IRQ_TIME_ACCOUNTING on any
                        platforms where RDTSC is slow and this accounting
@@@ -874,7 -874,7 +874,7 @@@ Possible values are
   - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
                                   is waiting for an interrupt
   - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
 -                                 accesible via KVM_GET_VCPU_EVENTS)
 +                                 accessible via KVM_GET_VCPU_EVENTS)
  
  This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
  irqchip, the multiprocessing state must be maintained by userspace.
@@@ -1085,6 -1085,184 +1085,184 @@@ of 4 instructions that make up a hyperc
  If any additional field gets added to this structure later on, a bit for that
  additional piece of information will be set in the flags bitmap.
  
+ 4.47 KVM_ASSIGN_PCI_DEVICE
+ Capability: KVM_CAP_DEVICE_ASSIGNMENT
+ Architectures: x86 ia64
+ Type: vm ioctl
+ Parameters: struct kvm_assigned_pci_dev (in)
+ Returns: 0 on success, -1 on error
+ Assigns a host PCI device to the VM.
+ struct kvm_assigned_pci_dev {
+       __u32 assigned_dev_id;
+       __u32 busnr;
+       __u32 devfn;
+       __u32 flags;
+       __u32 segnr;
+       union {
+               __u32 reserved[11];
+       };
+ };
+ The PCI device is specified by the triple segnr, busnr, and devfn.
+ Identification in succeeding service requests is done via assigned_dev_id. The
+ following flags are specified:
+ /* Depends on KVM_CAP_IOMMU */
+ #define KVM_DEV_ASSIGN_ENABLE_IOMMU   (1 << 0)
+ 4.48 KVM_DEASSIGN_PCI_DEVICE
+ Capability: KVM_CAP_DEVICE_DEASSIGNMENT
+ Architectures: x86 ia64
+ Type: vm ioctl
+ Parameters: struct kvm_assigned_pci_dev (in)
+ Returns: 0 on success, -1 on error
+ Ends PCI device assignment, releasing all associated resources.
+ See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
+ used in kvm_assigned_pci_dev to identify the device.
+ 4.49 KVM_ASSIGN_DEV_IRQ
+ Capability: KVM_CAP_ASSIGN_DEV_IRQ
+ Architectures: x86 ia64
+ Type: vm ioctl
+ Parameters: struct kvm_assigned_irq (in)
+ Returns: 0 on success, -1 on error
+ Assigns an IRQ to a passed-through device.
+ struct kvm_assigned_irq {
+       __u32 assigned_dev_id;
+       __u32 host_irq;
+       __u32 guest_irq;
+       __u32 flags;
+       union {
+               struct {
+                       __u32 addr_lo;
+                       __u32 addr_hi;
+                       __u32 data;
+               } guest_msi;
+               __u32 reserved[12];
+       };
+ };
+ The following flags are defined:
+ #define KVM_DEV_IRQ_HOST_INTX    (1 << 0)
+ #define KVM_DEV_IRQ_HOST_MSI     (1 << 1)
+ #define KVM_DEV_IRQ_HOST_MSIX    (1 << 2)
+ #define KVM_DEV_IRQ_GUEST_INTX   (1 << 8)
+ #define KVM_DEV_IRQ_GUEST_MSI    (1 << 9)
+ #define KVM_DEV_IRQ_GUEST_MSIX   (1 << 10)
+ It is not valid to specify multiple types per host or guest IRQ. However, the
+ IRQ type of host and guest can differ or can even be null.
+ 4.50 KVM_DEASSIGN_DEV_IRQ
+ Capability: KVM_CAP_ASSIGN_DEV_IRQ
+ Architectures: x86 ia64
+ Type: vm ioctl
+ Parameters: struct kvm_assigned_irq (in)
+ Returns: 0 on success, -1 on error
+ Ends an IRQ assignment to a passed-through device.
+ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
+ by assigned_dev_id, flags must correspond to the IRQ type specified on
+ KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
+ 4.51 KVM_SET_GSI_ROUTING
+ Capability: KVM_CAP_IRQ_ROUTING
+ Architectures: x86 ia64
+ Type: vm ioctl
+ Parameters: struct kvm_irq_routing (in)
+ Returns: 0 on success, -1 on error
+ Sets the GSI routing table entries, overwriting any previously set entries.
+ struct kvm_irq_routing {
+       __u32 nr;
+       __u32 flags;
+       struct kvm_irq_routing_entry entries[0];
+ };
+ No flags are specified so far, the corresponding field must be set to zero.
+ struct kvm_irq_routing_entry {
+       __u32 gsi;
+       __u32 type;
+       __u32 flags;
+       __u32 pad;
+       union {
+               struct kvm_irq_routing_irqchip irqchip;
+               struct kvm_irq_routing_msi msi;
+               __u32 pad[8];
+       } u;
+ };
+ /* gsi routing entry types */
+ #define KVM_IRQ_ROUTING_IRQCHIP 1
+ #define KVM_IRQ_ROUTING_MSI 2
+ No flags are specified so far, the corresponding field must be set to zero.
+ struct kvm_irq_routing_irqchip {
+       __u32 irqchip;
+       __u32 pin;
+ };
+ struct kvm_irq_routing_msi {
+       __u32 address_lo;
+       __u32 address_hi;
+       __u32 data;
+       __u32 pad;
+ };
+ 4.52 KVM_ASSIGN_SET_MSIX_NR
+ Capability: KVM_CAP_DEVICE_MSIX
+ Architectures: x86 ia64
+ Type: vm ioctl
+ Parameters: struct kvm_assigned_msix_nr (in)
+ Returns: 0 on success, -1 on error
+ Set the number of MSI-X interrupts for an assigned device. This service can
+ only be called once in the lifetime of an assigned device.
+ struct kvm_assigned_msix_nr {
+       __u32 assigned_dev_id;
+       __u16 entry_nr;
+       __u16 padding;
+ };
+ #define KVM_MAX_MSIX_PER_DEV          256
+ 4.53 KVM_ASSIGN_SET_MSIX_ENTRY
+ Capability: KVM_CAP_DEVICE_MSIX
+ Architectures: x86 ia64
+ Type: vm ioctl
+ Parameters: struct kvm_assigned_msix_entry (in)
+ Returns: 0 on success, -1 on error
+ Specifies the routing of an MSI-X assigned device interrupt to a GSI. Setting
+ the GSI vector to zero means disabling the interrupt.
+ struct kvm_assigned_msix_entry {
+       __u32 assigned_dev_id;
+       __u32 gsi;
+       __u16 entry; /* The index of entry in the MSI-X table */
+       __u16 padding[3];
+ };
  5. The kvm_run structure
  
  Application code obtains a pointer to the kvm_run structure by
@@@ -299,21 -299,17 +299,21 @@@ ENDPROC(native_usergs_sysret64
  ENTRY(save_args)
        XCPT_FRAME
        cld
 -      movq_cfi rdi, RDI+16-ARGOFFSET
 -      movq_cfi rsi, RSI+16-ARGOFFSET
 -      movq_cfi rdx, RDX+16-ARGOFFSET
 -      movq_cfi rcx, RCX+16-ARGOFFSET
 -      movq_cfi rax, RAX+16-ARGOFFSET
 -      movq_cfi  r8,  R8+16-ARGOFFSET
 -      movq_cfi  r9,  R9+16-ARGOFFSET
 -      movq_cfi r10, R10+16-ARGOFFSET
 -      movq_cfi r11, R11+16-ARGOFFSET
 -
 -      leaq -ARGOFFSET+16(%rsp),%rdi   /* arg1 for handler */
 +      /*
 +       * start from rbp in pt_regs and jump over
 +       * return address.
 +       */
 +      movq_cfi rdi, RDI+8-RBP
 +      movq_cfi rsi, RSI+8-RBP
 +      movq_cfi rdx, RDX+8-RBP
 +      movq_cfi rcx, RCX+8-RBP
 +      movq_cfi rax, RAX+8-RBP
 +      movq_cfi  r8,  R8+8-RBP
 +      movq_cfi  r9,  R9+8-RBP
 +      movq_cfi r10, R10+8-RBP
 +      movq_cfi r11, R11+8-RBP
 +
 +      leaq -RBP+8(%rsp),%rdi  /* arg1 for handler */
        movq_cfi rbp, 8         /* push %rbp */
        leaq 8(%rsp), %rbp              /* mov %rsp, %ebp */
        testl $3, CS(%rdi)
@@@ -786,9 -782,8 +786,9 @@@ END(interrupt
  
  /* 0(%rsp): ~(interrupt number) */
        .macro interrupt func
 -      subq $ORIG_RAX-ARGOFFSET+8, %rsp
 -      CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
 +      /* reserve pt_regs for scratch regs and rbp */
 +      subq $ORIG_RAX-RBP, %rsp
 +      CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
        call save_args
        PARTIAL_FRAME 0
        call \func
@@@ -813,14 -808,9 +813,14 @@@ ret_from_intr
        TRACE_IRQS_OFF
        decl PER_CPU_VAR(irq_count)
        leaveq
 +
        CFI_RESTORE             rbp
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
 +
 +      /* we did not save rbx, restore only from ARGOFFSET */
 +      addq $8, %rsp
 +      CFI_ADJUST_CFA_OFFSET   -8
  exit_intr:
        GET_THREAD_INFO(%rcx)
        testl $3,CS-ARGOFFSET(%rsp)
@@@ -1329,6 -1319,9 +1329,9 @@@ errorentry xen_stack_segment do_stack_s
  #endif
  errorentry general_protection do_general_protection
  errorentry page_fault do_page_fault
+ #ifdef CONFIG_KVM_GUEST
+ errorentry async_page_fault do_async_page_fault
+ #endif
  #ifdef CONFIG_X86_MCE
  paranoidzeroentry machine_check *machine_check_vector(%rip)
  #endif
diff --combined arch/x86/kvm/x86.c
@@@ -43,6 -43,7 +43,7 @@@
  #include <linux/slab.h>
  #include <linux/perf_event.h>
  #include <linux/uaccess.h>
+ #include <linux/hash.h>
  #include <trace/events/kvm.h>
  
  #define CREATE_TRACE_POINTS
@@@ -155,6 -156,13 +156,13 @@@ struct kvm_stats_debugfs_item debugfs_e
  
  u64 __read_mostly host_xcr0;
  
+ static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
+ {
+       int i;
+       for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
+               vcpu->arch.apf.gfns[i] = ~0;
+ }
  static void kvm_on_user_return(struct user_return_notifier *urn)
  {
        unsigned slot;
@@@ -326,23 -334,28 +334,28 @@@ void kvm_requeue_exception(struct kvm_v
  }
  EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  
- void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
+ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
  {
-       unsigned error_code = vcpu->arch.fault.error_code;
+       if (err)
+               kvm_inject_gp(vcpu, 0);
+       else
+               kvm_x86_ops->skip_emulated_instruction(vcpu);
+ }
+ EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
  
+ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
+ {
        ++vcpu->stat.pf_guest;
-       vcpu->arch.cr2 = vcpu->arch.fault.address;
-       kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+       vcpu->arch.cr2 = fault->address;
+       kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
  }
  
- void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+ void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  {
-       if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
-               vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+       if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
+               vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
        else
-               vcpu->arch.mmu.inject_page_fault(vcpu);
-       vcpu->arch.fault.nested = false;
+               vcpu->arch.mmu.inject_page_fault(vcpu, fault);
  }
  
  void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@@ -460,8 -473,8 +473,8 @@@ static bool pdptrs_changed(struct kvm_v
                      (unsigned long *)&vcpu->arch.regs_avail))
                return true;
  
-       gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
-       offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+       gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
+       offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
        r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
                                       PFERR_USER_MASK | PFERR_WRITE_MASK);
        if (r < 0)
@@@ -506,12 -519,15 +519,15 @@@ int kvm_set_cr0(struct kvm_vcpu *vcpu, 
                } else
  #endif
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
-                                                vcpu->arch.cr3))
+                                                kvm_read_cr3(vcpu)))
                        return 1;
        }
  
        kvm_x86_ops->set_cr0(vcpu, cr0);
  
+       if ((cr0 ^ old_cr0) & X86_CR0_PG)
+               kvm_clear_async_pf_completion_queue(vcpu);
        if ((cr0 ^ old_cr0) & update_bits)
                kvm_mmu_reset_context(vcpu);
        return 0;
@@@ -595,7 -611,8 +611,8 @@@ int kvm_set_cr4(struct kvm_vcpu *vcpu, 
                        return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
-                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
+                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+                                  kvm_read_cr3(vcpu)))
                return 1;
  
        if (cr4 & X86_CR4_VMXE)
@@@ -615,7 -632,7 +632,7 @@@ EXPORT_SYMBOL_GPL(kvm_set_cr4)
  
  int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  {
-       if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
+       if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
                kvm_mmu_sync_roots(vcpu);
                kvm_mmu_flush_tlb(vcpu);
                return 0;
        if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
                return 1;
        vcpu->arch.cr3 = cr3;
+       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
        vcpu->arch.mmu.new_cr3(vcpu);
        return 0;
  }
  EXPORT_SYMBOL_GPL(kvm_set_cr3);
  
- int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  {
        if (cr8 & CR8_RESERVED_BITS)
                return 1;
                vcpu->arch.cr8 = cr8;
        return 0;
  }
- void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
- {
-       if (__kvm_set_cr8(vcpu, cr8))
-               kvm_inject_gp(vcpu, 0);
- }
  EXPORT_SYMBOL_GPL(kvm_set_cr8);
  
  unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
@@@ -775,12 -787,12 +787,12 @@@ EXPORT_SYMBOL_GPL(kvm_get_dr)
   * kvm-specific. Those are put in the beginning of the list.
   */
  
- #define KVM_SAVE_MSRS_BEGIN   7
+ #define KVM_SAVE_MSRS_BEGIN   8
  static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
-       HV_X64_MSR_APIC_ASSIST_PAGE,
+       HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_STAR,
  #ifdef CONFIG_X86_64
@@@ -830,7 -842,6 +842,6 @@@ static int set_efer(struct kvm_vcpu *vc
        kvm_x86_ops->set_efer(vcpu, efer);
  
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
-       kvm_mmu_reset_context(vcpu);
  
        /* Update reserved bits */
        if ((efer ^ old_efer) & EFER_NX)
@@@ -976,7 -987,7 +987,7 @@@ static inline u64 nsec_to_cycles(u64 ns
        if (kvm_tsc_changes_freq())
                printk_once(KERN_WARNING
                 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
 -      ret = nsec * __get_cpu_var(cpu_tsc_khz);
 +      ret = nsec * __this_cpu_read(cpu_tsc_khz);
        do_div(ret, USEC_PER_SEC);
        return ret;
  }
@@@ -1061,7 -1072,7 +1072,7 @@@ static int kvm_guest_time_update(struc
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
        kernel_ns = get_kernel_ns();
 -      this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
 +      this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
  
        if (unlikely(this_tsc_khz == 0)) {
                local_irq_restore(flags);
@@@ -1418,6 -1429,30 +1429,30 @@@ static int set_msr_hyperv(struct kvm_vc
        return 0;
  }
  
+ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+ {
+       gpa_t gpa = data & ~0x3f;
+       /* Bits 2:5 are resrved, Should be zero */
+       if (data & 0x3c)
+               return 1;
+       vcpu->arch.apf.msr_val = data;
+       if (!(data & KVM_ASYNC_PF_ENABLED)) {
+               kvm_clear_async_pf_completion_queue(vcpu);
+               kvm_async_pf_hash_reset(vcpu);
+               return 0;
+       }
+       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+               return 1;
+       vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+       kvm_async_pf_wakeup_all(vcpu);
+       return 0;
+ }
  int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  {
        switch (msr) {
                }
                break;
        }
+       case MSR_KVM_ASYNC_PF_EN:
+               if (kvm_pv_enable_async_pf(vcpu, data))
+                       return 1;
+               break;
        case MSR_IA32_MCG_CTL:
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@@ -1775,6 -1814,9 +1814,9 @@@ int kvm_get_msr_common(struct kvm_vcpu 
        case MSR_KVM_SYSTEM_TIME_NEW:
                data = vcpu->arch.time;
                break;
+       case MSR_KVM_ASYNC_PF_EN:
+               data = vcpu->arch.apf.msr_val;
+               break;
        case MSR_IA32_P5_MC_ADDR:
        case MSR_IA32_P5_MC_TYPE:
        case MSR_IA32_MCG_CAP:
@@@ -1904,6 -1946,7 +1946,7 @@@ int kvm_dev_ioctl_check_extension(long 
        case KVM_CAP_NOP_IO_DELAY:
        case KVM_CAP_MP_STATE:
        case KVM_CAP_SYNC_MMU:
+       case KVM_CAP_USER_NMI:
        case KVM_CAP_REINJECT_CONTROL:
        case KVM_CAP_IRQ_INJECT_STATUS:
        case KVM_CAP_ASSIGN_DEV_IRQ:
        case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
        case KVM_CAP_XSAVE:
+       case KVM_CAP_ASYNC_PF:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
        return r;
  }
  
+ static void cpuid_mask(u32 *word, int wordnum)
+ {
+       *word &= boot_cpu_data.x86_capability[wordnum];
+ }
  static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                           u32 index)
  {
@@@ -2259,7 -2308,9 +2308,9 @@@ static void do_cpuid_ent(struct kvm_cpu
                break;
        case 1:
                entry->edx &= kvm_supported_word0_x86_features;
+               cpuid_mask(&entry->edx, 0);
                entry->ecx &= kvm_supported_word4_x86_features;
+               cpuid_mask(&entry->ecx, 4);
                /* we support x2apic emulation even if host does not support
                 * it since we emulate x2apic in software */
                entry->ecx |= F(X2APIC);
                break;
        case 0x80000001:
                entry->edx &= kvm_supported_word1_x86_features;
+               cpuid_mask(&entry->edx, 1);
                entry->ecx &= kvm_supported_word6_x86_features;
+               cpuid_mask(&entry->ecx, 6);
                break;
        }
  
@@@ -3169,20 -3222,18 +3222,18 @@@ int kvm_vm_ioctl_get_dirty_log(struct k
                struct kvm_memslots *slots, *old_slots;
                unsigned long *dirty_bitmap;
  
-               r = -ENOMEM;
-               dirty_bitmap = vmalloc(n);
-               if (!dirty_bitmap)
-                       goto out;
+               dirty_bitmap = memslot->dirty_bitmap_head;
+               if (memslot->dirty_bitmap == dirty_bitmap)
+                       dirty_bitmap += n / sizeof(long);
                memset(dirty_bitmap, 0, n);
  
                r = -ENOMEM;
                slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-               if (!slots) {
-                       vfree(dirty_bitmap);
+               if (!slots)
                        goto out;
-               }
                memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
                slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+               slots->generation++;
  
                old_slots = kvm->memslots;
                rcu_assign_pointer(kvm->memslots, slots);
                spin_unlock(&kvm->mmu_lock);
  
                r = -EFAULT;
-               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
-                       vfree(dirty_bitmap);
+               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
                        goto out;
-               }
-               vfree(dirty_bitmap);
        } else {
                r = -EFAULT;
                if (clear_user(log->dirty_bitmap, n))
@@@ -3266,8 -3314,10 +3314,10 @@@ long kvm_arch_vm_ioctl(struct file *fil
                if (vpic) {
                        r = kvm_ioapic_init(kvm);
                        if (r) {
+                               mutex_lock(&kvm->slots_lock);
                                kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
                                                          &vpic->dev);
+                               mutex_unlock(&kvm->slots_lock);
                                kfree(vpic);
                                goto create_irqchip_unlock;
                        }
                smp_wmb();
                r = kvm_setup_default_irq_routing(kvm);
                if (r) {
+                       mutex_lock(&kvm->slots_lock);
                        mutex_lock(&kvm->irq_lock);
                        kvm_ioapic_destroy(kvm);
                        kvm_destroy_pic(kvm);
                        mutex_unlock(&kvm->irq_lock);
+                       mutex_unlock(&kvm->slots_lock);
                }
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@@ -3557,63 -3609,63 +3609,63 @@@ static gpa_t translate_gpa(struct kvm_v
  static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
  {
        gpa_t t_gpa;
-       u32 error;
+       struct x86_exception exception;
  
        BUG_ON(!mmu_is_nested(vcpu));
  
        /* NPT walks are always user-walks */
        access |= PFERR_USER_MASK;
-       t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
-       if (t_gpa == UNMAPPED_GVA)
-               vcpu->arch.fault.nested = true;
+       t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
  
        return t_gpa;
  }
  
- gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+                             struct x86_exception *exception)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  }
  
-  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+                               struct x86_exception *exception)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_FETCH_MASK;
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  }
  
- gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+                              struct x86_exception *exception)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_WRITE_MASK;
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  }
  
  /* uses this to access any guest's mapped memory without checking CPL */
- gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+                               struct x86_exception *exception)
  {
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
  }
  
  static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
                                      struct kvm_vcpu *vcpu, u32 access,
-                                     u32 *error)
+                                     struct x86_exception *exception)
  {
        void *data = val;
        int r = X86EMUL_CONTINUE;
  
        while (bytes) {
                gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
-                                                           error);
+                                                           exception);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
  
-               if (gpa == UNMAPPED_GVA) {
-                       r = X86EMUL_PROPAGATE_FAULT;
-                       goto out;
-               }
+               if (gpa == UNMAPPED_GVA)
+                       return X86EMUL_PROPAGATE_FAULT;
                ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
  
  /* used for instruction fetching */
  static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                               struct kvm_vcpu *vcpu, u32 *error)
+                               struct kvm_vcpu *vcpu,
+                               struct x86_exception *exception)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-                                         access | PFERR_FETCH_MASK, error);
+                                         access | PFERR_FETCH_MASK,
+                                         exception);
  }
  
  static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                              struct kvm_vcpu *vcpu, u32 *error)
+                              struct kvm_vcpu *vcpu,
+                              struct x86_exception *exception)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
-                                         error);
+                                         exception);
  }
  
  static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
-                              struct kvm_vcpu *vcpu, u32 *error)
+                                     struct kvm_vcpu *vcpu,
+                                     struct x86_exception *exception)
  {
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
  }
  
  static int kvm_write_guest_virt_system(gva_t addr, void *val,
                                       unsigned int bytes,
                                       struct kvm_vcpu *vcpu,
-                                      u32 *error)
+                                      struct x86_exception *exception)
  {
        void *data = val;
        int r = X86EMUL_CONTINUE;
        while (bytes) {
                gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
                                                             PFERR_WRITE_MASK,
-                                                            error);
+                                                            exception);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
  
-               if (gpa == UNMAPPED_GVA) {
-                       r = X86EMUL_PROPAGATE_FAULT;
-                       goto out;
-               }
+               if (gpa == UNMAPPED_GVA)
+                       return X86EMUL_PROPAGATE_FAULT;
                ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
@@@ -3688,7 -3742,7 +3742,7 @@@ out
  static int emulator_read_emulated(unsigned long addr,
                                  void *val,
                                  unsigned int bytes,
-                                 unsigned int *error_code,
+                                 struct x86_exception *exception,
                                  struct kvm_vcpu *vcpu)
  {
        gpa_t                 gpa;
                return X86EMUL_CONTINUE;
        }
  
-       gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
+       gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
  
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
                goto mmio;
  
-       if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
-                               == X86EMUL_CONTINUE)
+       if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
+           == X86EMUL_CONTINUE)
                return X86EMUL_CONTINUE;
  
  mmio:
  }
  
  int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
-                         const void *val, int bytes)
+                       const void *val, int bytes)
  {
        int ret;
  
  static int emulator_write_emulated_onepage(unsigned long addr,
                                           const void *val,
                                           unsigned int bytes,
-                                          unsigned int *error_code,
+                                          struct x86_exception *exception,
                                           struct kvm_vcpu *vcpu)
  {
        gpa_t                 gpa;
  
-       gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
+       gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
  
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
@@@ -3787,7 -3841,7 +3841,7 @@@ mmio
  int emulator_write_emulated(unsigned long addr,
                            const void *val,
                            unsigned int bytes,
-                           unsigned int *error_code,
+                           struct x86_exception *exception,
                            struct kvm_vcpu *vcpu)
  {
        /* Crossing a page boundary? */
                int rc, now;
  
                now = -addr & ~PAGE_MASK;
-               rc = emulator_write_emulated_onepage(addr, val, now, error_code,
+               rc = emulator_write_emulated_onepage(addr, val, now, exception,
                                                     vcpu);
                if (rc != X86EMUL_CONTINUE)
                        return rc;
                val += now;
                bytes -= now;
        }
-       return emulator_write_emulated_onepage(addr, val, bytes, error_code,
+       return emulator_write_emulated_onepage(addr, val, bytes, exception,
                                               vcpu);
  }
  
@@@ -3821,7 -3875,7 +3875,7 @@@ static int emulator_cmpxchg_emulated(un
                                     const void *old,
                                     const void *new,
                                     unsigned int bytes,
-                                    unsigned int *error_code,
+                                    struct x86_exception *exception,
                                     struct kvm_vcpu *vcpu)
  {
        gpa_t gpa;
  emul_write:
        printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  
-       return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
+       return emulator_write_emulated(addr, new, bytes, exception, vcpu);
  }
  
  static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
@@@ -3904,7 -3958,7 +3958,7 @@@ static int emulator_pio_in_emulated(in
        if (vcpu->arch.pio.count)
                goto data_avail;
  
-       trace_kvm_pio(0, port, size, 1);
+       trace_kvm_pio(0, port, size, count);
  
        vcpu->arch.pio.port = port;
        vcpu->arch.pio.in = 1;
@@@ -3932,7 -3986,7 +3986,7 @@@ static int emulator_pio_out_emulated(in
                              const void *val, unsigned int count,
                              struct kvm_vcpu *vcpu)
  {
-       trace_kvm_pio(1, port, size, 1);
+       trace_kvm_pio(1, port, size, count);
  
        vcpu->arch.pio.port = port;
        vcpu->arch.pio.in = 0;
@@@ -3973,13 -4027,15 +4027,15 @@@ int kvm_emulate_wbinvd(struct kvm_vcpu 
                return X86EMUL_CONTINUE;
  
        if (kvm_x86_ops->has_wbinvd_exit()) {
-               preempt_disable();
+               int cpu = get_cpu();
+               cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
                smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
                                wbinvd_ipi, NULL, 1);
-               preempt_enable();
+               put_cpu();
                cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
-       }
-       wbinvd();
+       } else
+               wbinvd();
        return X86EMUL_CONTINUE;
  }
  EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@@ -4019,7 -4075,7 +4075,7 @@@ static unsigned long emulator_get_cr(in
                value = vcpu->arch.cr2;
                break;
        case 3:
-               value = vcpu->arch.cr3;
+               value = kvm_read_cr3(vcpu);
                break;
        case 4:
                value = kvm_read_cr4(vcpu);
@@@ -4053,7 -4109,7 +4109,7 @@@ static int emulator_set_cr(int cr, unsi
                res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
                break;
        case 8:
-               res = __kvm_set_cr8(vcpu, val & 0xfUL);
+               res = kvm_set_cr8(vcpu, val);
                break;
        default:
                vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
@@@ -4206,12 -4262,13 +4262,13 @@@ static void toggle_interruptibility(str
  static void inject_emulated_exception(struct kvm_vcpu *vcpu)
  {
        struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
-       if (ctxt->exception == PF_VECTOR)
-               kvm_propagate_fault(vcpu);
-       else if (ctxt->error_code_valid)
-               kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
+       if (ctxt->exception.vector == PF_VECTOR)
+               kvm_propagate_fault(vcpu, &ctxt->exception);
+       else if (ctxt->exception.error_code_valid)
+               kvm_queue_exception_e(vcpu, ctxt->exception.vector,
+                                     ctxt->exception.error_code);
        else
-               kvm_queue_exception(vcpu, ctxt->exception);
+               kvm_queue_exception(vcpu, ctxt->exception.vector);
  }
  
  static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
@@@ -4267,13 -4324,19 +4324,19 @@@ EXPORT_SYMBOL_GPL(kvm_inject_realmode_i
  
  static int handle_emulation_failure(struct kvm_vcpu *vcpu)
  {
+       int r = EMULATE_DONE;
        ++vcpu->stat.insn_emulation_fail;
        trace_kvm_emulate_insn_failed(vcpu);
-       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-       vcpu->run->internal.ndata = 0;
+       if (!is_guest_mode(vcpu)) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+               vcpu->run->internal.ndata = 0;
+               r = EMULATE_FAIL;
+       }
        kvm_queue_exception(vcpu, UD_VECTOR);
-       return EMULATE_FAIL;
+       return r;
  }
  
  static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
        return false;
  }
  
- int emulate_instruction(struct kvm_vcpu *vcpu,
-                       unsigned long cr2,
-                       u16 error_code,
-                       int emulation_type)
+ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+                           unsigned long cr2,
+                           int emulation_type,
+                           void *insn,
+                           int insn_len)
  {
        int r;
        struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
        if (!(emulation_type & EMULTYPE_NO_DECODE)) {
                init_emulate_ctxt(vcpu);
                vcpu->arch.emulate_ctxt.interruptibility = 0;
-               vcpu->arch.emulate_ctxt.exception = -1;
+               vcpu->arch.emulate_ctxt.have_exception = false;
                vcpu->arch.emulate_ctxt.perm_ok = false;
  
-               r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+               r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
                if (r == X86EMUL_PROPAGATE_FAULT)
                        goto done;
  
@@@ -4389,7 -4453,7 +4453,7 @@@ restart
        }
  
  done:
-       if (vcpu->arch.emulate_ctxt.exception >= 0) {
+       if (vcpu->arch.emulate_ctxt.have_exception) {
                inject_emulated_exception(vcpu);
                r = EMULATE_DONE;
        } else if (vcpu->arch.pio.count) {
  
        return r;
  }
- EXPORT_SYMBOL_GPL(emulate_instruction);
+ EXPORT_SYMBOL_GPL(x86_emulate_instruction);
  
  int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  {
@@@ -4427,7 -4491,7 +4491,7 @@@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out)
  
  static void tsc_bad(void *info)
  {
 -      __get_cpu_var(cpu_tsc_khz) = 0;
 +      __this_cpu_write(cpu_tsc_khz, 0);
  }
  
  static void tsc_khz_changed(void *data)
                khz = cpufreq_quick_get(raw_smp_processor_id());
        if (!khz)
                khz = tsc_khz;
 -      __get_cpu_var(cpu_tsc_khz) = khz;
 +      __this_cpu_write(cpu_tsc_khz, khz);
  }
  
  static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@@ -4653,7 -4717,6 +4717,6 @@@ int kvm_arch_init(void *opaque
  
        kvm_x86_ops = ops;
        kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-       kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
        kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
                        PT_DIRTY_MASK, PT64_NX_MASK, 0);
  
@@@ -5116,6 -5179,12 +5179,12 @@@ static int vcpu_enter_guest(struct kvm_
                        vcpu->fpu_active = 0;
                        kvm_x86_ops->fpu_deactivate(vcpu);
                }
+               if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+                       /* Page is swapped out. Do synthetic halt */
+                       vcpu->arch.apf.halted = true;
+                       r = 1;
+                       goto out;
+               }
        }
  
        r = kvm_mmu_reload(vcpu);
@@@ -5244,7 -5313,8 +5313,8 @@@ static int __vcpu_run(struct kvm_vcpu *
  
        r = 1;
        while (r > 0) {
-               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+                   !vcpu->arch.apf.halted)
                        r = vcpu_enter_guest(vcpu);
                else {
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                                        vcpu->arch.mp_state =
                                                KVM_MP_STATE_RUNNABLE;
                                case KVM_MP_STATE_RUNNABLE:
+                                       vcpu->arch.apf.halted = false;
                                        break;
                                case KVM_MP_STATE_SIPI_RECEIVED:
                                default:
                        vcpu->run->exit_reason = KVM_EXIT_INTR;
                        ++vcpu->stat.request_irq_exits;
                }
+               kvm_check_async_pf_completion(vcpu);
                if (signal_pending(current)) {
                        r = -EINTR;
                        vcpu->run->exit_reason = KVM_EXIT_INTR;
@@@ -5302,6 -5376,9 +5376,9 @@@ int kvm_arch_vcpu_ioctl_run(struct kvm_
        int r;
        sigset_t sigsaved;
  
+       if (!tsk_used_math(current) && init_fpu(current))
+               return -ENOMEM;
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  
        }
  
        /* re-sync apic's tpr */
-       if (!irqchip_in_kernel(vcpu->kvm))
-               kvm_set_cr8(vcpu, kvm_run->cr8);
+       if (!irqchip_in_kernel(vcpu->kvm)) {
+               if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
+                       r = -EINVAL;
+                       goto out;
+               }
+       }
  
        if (vcpu->arch.pio.count || vcpu->mmio_needed) {
                if (vcpu->mmio_needed) {
                        vcpu->mmio_needed = 0;
                }
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-               r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
+               r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
                srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r != EMULATE_DONE) {
                        r = 0;
@@@ -5436,7 -5517,7 +5517,7 @@@ int kvm_arch_vcpu_ioctl_get_sregs(struc
  
        sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
-       sregs->cr3 = vcpu->arch.cr3;
+       sregs->cr3 = kvm_read_cr3(vcpu);
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
        sregs->efer = vcpu->arch.efer;
@@@ -5504,8 -5585,9 +5585,9 @@@ int kvm_arch_vcpu_ioctl_set_sregs(struc
        kvm_x86_ops->set_gdt(vcpu, &dt);
  
        vcpu->arch.cr2 = sregs->cr2;
-       mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+       mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
        vcpu->arch.cr3 = sregs->cr3;
+       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  
        kvm_set_cr8(vcpu, sregs->cr8);
  
        if (sregs->cr4 & X86_CR4_OSXSAVE)
                update_cpuid(vcpu);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-               load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
                mmu_reset_needed = 1;
        }
  
@@@ -5773,6 -5855,8 +5855,8 @@@ free_vcpu
  
  void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  {
+       vcpu->arch.apf.msr_val = 0;
        vcpu_load(vcpu);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
@@@ -5792,6 -5876,11 +5876,11 @@@ int kvm_arch_vcpu_reset(struct kvm_vcp
        vcpu->arch.dr7 = DR7_FIXED_1;
  
        kvm_make_request(KVM_REQ_EVENT, vcpu);
+       vcpu->arch.apf.msr_val = 0;
+       kvm_clear_async_pf_completion_queue(vcpu);
+       kvm_async_pf_hash_reset(vcpu);
+       vcpu->arch.apf.halted = false;
  
        return kvm_x86_ops->vcpu_reset(vcpu);
  }
@@@ -5881,6 -5970,8 +5970,8 @@@ int kvm_arch_vcpu_init(struct kvm_vcpu 
        if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
                goto fail_free_mce_banks;
  
+       kvm_async_pf_hash_reset(vcpu);
        return 0;
  fail_free_mce_banks:
        kfree(vcpu->arch.mce_banks);
@@@ -5906,13 -5997,8 +5997,8 @@@ void kvm_arch_vcpu_uninit(struct kvm_vc
        free_page((unsigned long)vcpu->arch.pio_data);
  }
  
struct  kvm *kvm_arch_create_vm(void)
int kvm_arch_init_vm(struct kvm *kvm)
  {
-       struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-       if (!kvm)
-               return ERR_PTR(-ENOMEM);
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  
  
        spin_lock_init(&kvm->arch.tsc_write_lock);
  
-       return kvm;
+       return 0;
  }
  
  static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@@ -5939,8 -6025,10 +6025,10 @@@ static void kvm_free_vcpus(struct kvm *
        /*
         * Unpin any mmu pages first.
         */
-       kvm_for_each_vcpu(i, vcpu, kvm)
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_clear_async_pf_completion_queue(vcpu);
                kvm_unload_vcpu_mmu(vcpu);
+       }
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_arch_vcpu_free(vcpu);
  
@@@ -5964,13 -6052,10 +6052,10 @@@ void kvm_arch_destroy_vm(struct kvm *kv
        kfree(kvm->arch.vpic);
        kfree(kvm->arch.vioapic);
        kvm_free_vcpus(kvm);
-       kvm_free_physmem(kvm);
        if (kvm->arch.apic_access_page)
                put_page(kvm->arch.apic_access_page);
        if (kvm->arch.ept_identity_pagetable)
                put_page(kvm->arch.ept_identity_pagetable);
-       cleanup_srcu_struct(&kvm->srcu);
-       kfree(kvm);
  }
  
  int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@@ -6051,7 -6136,9 +6136,9 @@@ void kvm_arch_flush_shadow(struct kvm *
  
  int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  {
-       return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+               !vcpu->arch.apf.halted)
+               || !list_empty_careful(&vcpu->async_pf.done)
                || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
                || vcpu->arch.nmi_pending ||
                (kvm_arch_interrupt_allowed(vcpu) &&
@@@ -6110,6 -6197,147 +6197,147 @@@ void kvm_set_rflags(struct kvm_vcpu *vc
  }
  EXPORT_SYMBOL_GPL(kvm_set_rflags);
  
+ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+ {
+       int r;
+       if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
+             is_error_page(work->page))
+               return;
+       r = kvm_mmu_reload(vcpu);
+       if (unlikely(r))
+               return;
+       if (!vcpu->arch.mmu.direct_map &&
+             work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+               return;
+       vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+ }
+ static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
+ {
+       return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
+ }
+ static inline u32 kvm_async_pf_next_probe(u32 key)
+ {
+       return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
+ }
+ static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+ {
+       u32 key = kvm_async_pf_hash_fn(gfn);
+       while (vcpu->arch.apf.gfns[key] != ~0)
+               key = kvm_async_pf_next_probe(key);
+       vcpu->arch.apf.gfns[key] = gfn;
+ }
+ static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
+ {
+       int i;
+       u32 key = kvm_async_pf_hash_fn(gfn);
+       for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
+                    (vcpu->arch.apf.gfns[key] != gfn &&
+                     vcpu->arch.apf.gfns[key] != ~0); i++)
+               key = kvm_async_pf_next_probe(key);
+       return key;
+ }
+ bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+ {
+       return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
+ }
+ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+ {
+       u32 i, j, k;
+       i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
+       while (true) {
+               vcpu->arch.apf.gfns[i] = ~0;
+               do {
+                       j = kvm_async_pf_next_probe(j);
+                       if (vcpu->arch.apf.gfns[j] == ~0)
+                               return;
+                       k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
+                       /*
+                        * k lies cyclically in ]i,j]
+                        * |    i.k.j |
+                        * |....j i.k.| or  |.k..j i...|
+                        */
+               } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
+               vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
+               i = j;
+       }
+ }
+ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+ {
+       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+                                     sizeof(val));
+ }
+ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+                                    struct kvm_async_pf *work)
+ {
+       struct x86_exception fault;
+       trace_kvm_async_pf_not_present(work->arch.token, work->gva);
+       kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+       if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
+           (vcpu->arch.apf.send_user_only &&
+            kvm_x86_ops->get_cpl(vcpu) == 0))
+               kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+       else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+               fault.vector = PF_VECTOR;
+               fault.error_code_valid = true;
+               fault.error_code = 0;
+               fault.nested_page_fault = false;
+               fault.address = work->arch.token;
+               kvm_inject_page_fault(vcpu, &fault);
+       }
+ }
+ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+                                struct kvm_async_pf *work)
+ {
+       struct x86_exception fault;
+       trace_kvm_async_pf_ready(work->arch.token, work->gva);
+       if (is_error_page(work->page))
+               work->arch.token = ~0; /* broadcast wakeup */
+       else
+               kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+       if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+           !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+               fault.vector = PF_VECTOR;
+               fault.error_code_valid = true;
+               fault.error_code = 0;
+               fault.nested_page_fault = false;
+               fault.address = work->arch.token;
+               kvm_inject_page_fault(vcpu, &fault);
+       }
+       vcpu->arch.apf.halted = false;
+ }
+ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+ {
+       if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+               return true;
+       else
+               return !kvm_event_needs_reinjection(vcpu) &&
+                       kvm_x86_ops->interrupt_allowed(vcpu);
+ }
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);