Merge branch 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Mar 2011 01:40:35 +0000 (18:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Mar 2011 01:40:35 +0000 (18:40 -0700)
* 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (55 commits)
  KVM: unbreak userspace that does not sets tss address
  KVM: MMU: cleanup pte write path
  KVM: MMU: introduce a common function to get no-dirty-logged slot
  KVM: fix rcu usage in init_rmode_* functions
  KVM: fix kvmclock regression due to missing clock update
  KVM: emulator: Fix permission checking in io permission bitmap
  KVM: emulator: Fix io permission checking for 64bit guest
  KVM: SVM: Load %gs earlier if CONFIG_X86_32_LAZY_GS=n
  KVM: x86: Remove useless regs_page pointer from kvm_lapic
  KVM: improve comment on rcu use in irqfd_deassign
  KVM: MMU: remove unused macros
  KVM: MMU: cleanup page alloc and free
  KVM: MMU: do not record gfn in kvm_mmu_pte_write
  KVM: MMU: move mmu pages calculated out of mmu lock
  KVM: MMU: set spte accessed bit properly
  KVM: MMU: fix kvm_mmu_slot_remove_write_access dropping intermediate W bits
  KVM: Start lock documentation
  KVM: better readability of efer_reserved_bits
  KVM: Clear async page fault hash after switching to real mode
  KVM: VMX: Initialize vm86 TSS only once.
  ...

33 files changed:
Documentation/kvm/locking.txt [new file with mode: 0644]
arch/alpha/include/asm/errno.h
arch/ia64/kvm/kvm-ia64.c
arch/mips/include/asm/errno.h
arch/parisc/include/asm/errno.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/booke.c
arch/sparc/include/asm/errno.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/kernel/kvm.c
arch/x86/kvm/emulate.c
arch/x86/kvm/i8259.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/qib/qib_user_pages.c
include/asm-generic/errno.h
include/linux/kvm_host.h
include/linux/mm.h
kernel/fork.c
kernel/pid.c
mm/internal.h
mm/memory-failure.c
mm/memory.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

diff --git a/Documentation/kvm/locking.txt b/Documentation/kvm/locking.txt
new file mode 100644 (file)
index 0000000..3b4cd3b
--- /dev/null
@@ -0,0 +1,25 @@
+KVM Lock Overview
+=================
+
+1. Acquisition Orders
+---------------------
+
+(to be written)
+
+2. Reference
+------------
+
+Name:          kvm_lock
+Type:          raw_spinlock
+Arch:          any
+Protects:      - vm_list
+               - hardware virtualization enable/disable
+Comment:       'raw' because hardware enabling/disabling must be atomic /wrt
+               migration.
+
+Name:          kvm_arch::tsc_write_lock
+Type:          raw_spinlock
+Arch:          x86
+Protects:      - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset}
+               - tsc offset in vmcb
+Comment:       'raw' because updating the tsc offsets must not be preempted.
index 98099bd..e5f29ca 100644 (file)
 
 #define        ERFKILL         138     /* Operation not possible due to RF-kill */
 
+#define EHWPOISON      139     /* Memory page has hardware error */
+
 #endif
index 70d224d..8213efe 100644 (file)
@@ -662,6 +662,7 @@ again:
                goto vcpu_run_fail;
 
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       vcpu->mode = IN_GUEST_MODE;
        kvm_guest_enter();
 
        /*
@@ -683,6 +684,7 @@ again:
         */
        barrier();
        kvm_guest_exit();
+       vcpu->mode = OUTSIDE_GUEST_MODE;
        preempt_enable();
 
        idx = srcu_read_lock(&vcpu->kvm->srcu);
index a0efc73..6dcd358 100644 (file)
 
 #define        ERFKILL         167     /* Operation not possible due to RF-kill */
 
+#define EHWPOISON      168     /* Memory page has hardware error */
+
 #define EDQUOT         1133    /* Quota exceeded */
 
 #ifdef __KERNEL__
index 9992abd..135ad60 100644 (file)
 
 #define        ERFKILL         256     /* Operation not possible due to RF-kill */
 
+#define EHWPOISON      257     /* Memory page has hardware error */
+
 #endif
index badc983..c961de4 100644 (file)
@@ -1141,9 +1141,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->sprg1 = vcpu->arch.shared->sprg1;
        regs->sprg2 = vcpu->arch.shared->sprg2;
        regs->sprg3 = vcpu->arch.shared->sprg3;
-       regs->sprg5 = vcpu->arch.sprg4;
-       regs->sprg6 = vcpu->arch.sprg5;
-       regs->sprg7 = vcpu->arch.sprg6;
+       regs->sprg4 = vcpu->arch.sprg4;
+       regs->sprg5 = vcpu->arch.sprg5;
+       regs->sprg6 = vcpu->arch.sprg6;
+       regs->sprg7 = vcpu->arch.sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -1167,9 +1168,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.shared->sprg1 = regs->sprg1;
        vcpu->arch.shared->sprg2 = regs->sprg2;
        vcpu->arch.shared->sprg3 = regs->sprg3;
-       vcpu->arch.sprg5 = regs->sprg4;
-       vcpu->arch.sprg6 = regs->sprg5;
-       vcpu->arch.sprg7 = regs->sprg6;
+       vcpu->arch.sprg4 = regs->sprg4;
+       vcpu->arch.sprg5 = regs->sprg5;
+       vcpu->arch.sprg6 = regs->sprg6;
+       vcpu->arch.sprg7 = regs->sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
index 77575d0..ef76acb 100644 (file)
@@ -546,9 +546,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->sprg1 = vcpu->arch.shared->sprg1;
        regs->sprg2 = vcpu->arch.shared->sprg2;
        regs->sprg3 = vcpu->arch.shared->sprg3;
-       regs->sprg5 = vcpu->arch.sprg4;
-       regs->sprg6 = vcpu->arch.sprg5;
-       regs->sprg7 = vcpu->arch.sprg6;
+       regs->sprg4 = vcpu->arch.sprg4;
+       regs->sprg5 = vcpu->arch.sprg5;
+       regs->sprg6 = vcpu->arch.sprg6;
+       regs->sprg7 = vcpu->arch.sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -572,9 +573,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.shared->sprg1 = regs->sprg1;
        vcpu->arch.shared->sprg2 = regs->sprg2;
        vcpu->arch.shared->sprg3 = regs->sprg3;
-       vcpu->arch.sprg5 = regs->sprg4;
-       vcpu->arch.sprg6 = regs->sprg5;
-       vcpu->arch.sprg7 = regs->sprg6;
+       vcpu->arch.sprg4 = regs->sprg4;
+       vcpu->arch.sprg5 = regs->sprg5;
+       vcpu->arch.sprg6 = regs->sprg6;
+       vcpu->arch.sprg7 = regs->sprg7;
 
        for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
                kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
index 4e2bc49..c351aba 100644 (file)
 
 #define        ERFKILL         134     /* Operation not possible due to RF-kill */
 
+#define EHWPOISON      135     /* Memory page has hardware error */
+
 #endif
index 8e37deb..0f52135 100644 (file)
@@ -142,9 +142,9 @@ struct x86_emulate_ops {
        int (*pio_out_emulated)(int size, unsigned short port, const void *val,
                                unsigned int count, struct kvm_vcpu *vcpu);
 
-       bool (*get_cached_descriptor)(struct desc_struct *desc,
+       bool (*get_cached_descriptor)(struct desc_struct *desc, u32 *base3,
                                      int seg, struct kvm_vcpu *vcpu);
-       void (*set_cached_descriptor)(struct desc_struct *desc,
+       void (*set_cached_descriptor)(struct desc_struct *desc, u32 base3,
                                      int seg, struct kvm_vcpu *vcpu);
        u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
        void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
@@ -239,6 +239,7 @@ struct x86_emulate_ctxt {
        int interruptibility;
 
        bool perm_ok; /* do not check permissions if true */
+       bool only_vendor_specific_insn;
 
        bool have_exception;
        struct x86_exception exception;
index ffd7f8d..c8af099 100644 (file)
@@ -85,7 +85,7 @@
 
 #define ASYNC_PF_PER_VCPU 64
 
-extern spinlock_t kvm_lock;
+extern raw_spinlock_t kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_vcpu;
@@ -255,6 +255,8 @@ struct kvm_mmu {
        int (*sync_page)(struct kvm_vcpu *vcpu,
                         struct kvm_mmu_page *sp);
        void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
+       void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                       u64 *spte, const void *pte, unsigned long mmu_seq);
        hpa_t root_hpa;
        int root_level;
        int shadow_root_level;
@@ -335,12 +337,6 @@ struct kvm_vcpu_arch {
        u64  *last_pte_updated;
        gfn_t last_pte_gfn;
 
-       struct {
-               gfn_t gfn;      /* presumed gfn during guest pte update */
-               pfn_t pfn;      /* pfn corresponding to that gfn */
-               unsigned long mmu_seq;
-       } update_pte;
-
        struct fpu guest_fpu;
        u64 xcr0;
 
@@ -448,7 +444,7 @@ struct kvm_arch {
 
        unsigned long irq_sources_bitmap;
        s64 kvmclock_offset;
-       spinlock_t tsc_write_lock;
+       raw_spinlock_t tsc_write_lock;
        u64 last_tsc_nsec;
        u64 last_tsc_offset;
        u64 last_tsc_write;
index 823d482..fd5a1f3 100644 (file)
@@ -43,6 +43,7 @@
 
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
+#define MSR_IA32_BBL_CR_CTL3           0x0000011e
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
index 8dc4466..33c07b0 100644 (file)
@@ -493,7 +493,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
        native_smp_prepare_boot_cpu();
 }
 
-static void kvm_guest_cpu_online(void *dummy)
+static void __cpuinit kvm_guest_cpu_online(void *dummy)
 {
        kvm_guest_cpu_init();
 }
index caf9667..0ad47b8 100644 (file)
@@ -76,6 +76,7 @@
 #define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
 #define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
 /* Misc flags */
+#define VendorSpecific (1<<22) /* Vendor specific instruction */
 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
 #define Undefined   (1<<25) /* No Such Instruction */
@@ -877,7 +878,8 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
        if (selector & 1 << 2) {
                struct desc_struct desc;
                memset (dt, 0, sizeof *dt);
-               if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
+               if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR,
+                                               ctxt->vcpu))
                        return;
 
                dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
@@ -929,6 +931,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
        return ret;
 }
 
+/* Does not support long mode */
 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                                   struct x86_emulate_ops *ops,
                                   u16 selector, int seg)
@@ -1040,7 +1043,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
        }
 load:
        ops->set_segment_selector(selector, seg, ctxt->vcpu);
-       ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
+       ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu);
        return X86EMUL_CONTINUE;
 exception:
        emulate_exception(ctxt, err_vec, err_code, true);
@@ -1560,7 +1563,7 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
                        struct desc_struct *ss)
 {
        memset(cs, 0, sizeof(struct desc_struct));
-       ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
+       ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu);
        memset(ss, 0, sizeof(struct desc_struct));
 
        cs->l = 0;              /* will be adjusted later */
@@ -1607,9 +1610,9 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
                cs.d = 0;
                cs.l = 1;
        }
-       ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
+       ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
        ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
-       ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
+       ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
        ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
 
        c->regs[VCPU_REGS_RCX] = c->eip;
@@ -1679,9 +1682,9 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
                cs.l = 1;
        }
 
-       ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
+       ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
        ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
-       ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
+       ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
        ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
 
        ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
@@ -1736,9 +1739,9 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
        cs_sel |= SELECTOR_RPL_MASK;
        ss_sel |= SELECTOR_RPL_MASK;
 
-       ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
+       ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
        ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
-       ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
+       ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
        ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
 
        c->eip = c->regs[VCPU_REGS_RDX];
@@ -1764,24 +1767,28 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
                                            u16 port, u16 len)
 {
        struct desc_struct tr_seg;
+       u32 base3;
        int r;
-       u16 io_bitmap_ptr;
-       u8 perm, bit_idx = port & 0x7;
+       u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
        unsigned mask = (1 << len) - 1;
+       unsigned long base;
 
-       ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
+       ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu);
        if (!tr_seg.p)
                return false;
        if (desc_limit_scaled(&tr_seg) < 103)
                return false;
-       r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
-                         ctxt->vcpu, NULL);
+       base = get_desc_base(&tr_seg);
+#ifdef CONFIG_X86_64
+       base |= ((u64)base3) << 32;
+#endif
+       r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL);
        if (r != X86EMUL_CONTINUE)
                return false;
        if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
                return false;
-       r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
-                         &perm, 1, ctxt->vcpu, NULL);
+       r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu,
+                         NULL);
        if (r != X86EMUL_CONTINUE)
                return false;
        if ((perm >> bit_idx) & mask)
@@ -2126,7 +2133,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
        }
 
        ops->set_cr(0,  ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
-       ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
+       ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu);
        ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
 
        if (has_error_code) {
@@ -2365,7 +2372,8 @@ static struct group_dual group7 = { {
        D(SrcMem16 | ModRM | Mov | Priv),
        D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
 }, {
-       D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
+       D(SrcNone | ModRM | Priv | VendorSpecific), N,
+       N, D(SrcNone | ModRM | Priv | VendorSpecific),
        D(SrcNone | ModRM | DstMem | Mov), N,
        D(SrcMem16 | ModRM | Mov | Priv), N,
 } };
@@ -2489,7 +2497,7 @@ static struct opcode opcode_table[256] = {
 static struct opcode twobyte_table[256] = {
        /* 0x00 - 0x0F */
        N, GD(0, &group7), N, N,
-       N, D(ImplicitOps), D(ImplicitOps | Priv), N,
+       N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N,
        D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
        N, D(ImplicitOps | ModRM), N, N,
        /* 0x10 - 0x1F */
@@ -2502,7 +2510,8 @@ static struct opcode twobyte_table[256] = {
        /* 0x30 - 0x3F */
        D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
        D(ImplicitOps | Priv), N,
-       D(ImplicitOps), D(ImplicitOps | Priv), N, N,
+       D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
+       N, N,
        N, N, N, N, N, N, N, N,
        /* 0x40 - 0x4F */
        X16(D(DstReg | SrcMem | ModRM | Mov)),
@@ -2741,6 +2750,9 @@ done_prefixes:
        if (c->d == 0 || (c->d & Undefined))
                return -1;
 
+       if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
+               return -1;
+
        if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
                c->op_bytes = 8;
 
index 3cece05..19fe855 100644 (file)
@@ -61,9 +61,6 @@ static void pic_unlock(struct kvm_pic *s)
                        }
                }
 
-               if (!found)
-                       found = s->kvm->bsp_vcpu;
-
                if (!found)
                        return;
 
@@ -75,7 +72,6 @@ static void pic_unlock(struct kvm_pic *s)
 static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
 {
        s->isr &= ~(1 << irq);
-       s->isr_ack |= (1 << irq);
        if (s != &s->pics_state->pics[0])
                irq += 8;
        /*
@@ -89,16 +85,6 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
        pic_lock(s->pics_state);
 }
 
-void kvm_pic_clear_isr_ack(struct kvm *kvm)
-{
-       struct kvm_pic *s = pic_irqchip(kvm);
-
-       pic_lock(s);
-       s->pics[0].isr_ack = 0xff;
-       s->pics[1].isr_ack = 0xff;
-       pic_unlock(s);
-}
-
 /*
  * set irq level. If an edge is detected, then the IRR is set to 1
  */
@@ -281,7 +267,6 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
        s->irr = 0;
        s->imr = 0;
        s->isr = 0;
-       s->isr_ack = 0xff;
        s->priority_add = 0;
        s->irq_base = 0;
        s->read_reg_select = 0;
@@ -545,15 +530,11 @@ static int picdev_read(struct kvm_io_device *this,
  */
 static void pic_irq_request(struct kvm *kvm, int level)
 {
-       struct kvm_vcpu *vcpu = kvm->bsp_vcpu;
        struct kvm_pic *s = pic_irqchip(kvm);
-       int irq = pic_get_irq(&s->pics[0]);
 
-       s->output = level;
-       if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
-               s->pics[0].isr_ack &= ~(1 << irq);
+       if (!s->output)
                s->wakeup_needed = true;
-       }
+       s->output = level;
 }
 
 static const struct kvm_io_device_ops picdev_ops = {
@@ -575,8 +556,6 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
        s->pics[1].elcr_mask = 0xde;
        s->pics[0].pics_state = s;
        s->pics[1].pics_state = s;
-       s->pics[0].isr_ack = 0xff;
-       s->pics[1].isr_ack = 0xff;
 
        /*
         * Initialize PIO device
index 93cf9d0..2b2255b 100644 (file)
@@ -417,10 +417,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
        case APIC_DM_INIT:
                if (level) {
                        result = 1;
-                       if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
-                               printk(KERN_DEBUG
-                                      "INIT on a runnable vcpu %d\n",
-                                      vcpu->vcpu_id);
                        vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
                        kvm_make_request(KVM_REQ_EVENT, vcpu);
                        kvm_vcpu_kick(vcpu);
@@ -875,8 +871,8 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
 
        hrtimer_cancel(&vcpu->arch.apic->lapic_timer.timer);
 
-       if (vcpu->arch.apic->regs_page)
-               __free_page(vcpu->arch.apic->regs_page);
+       if (vcpu->arch.apic->regs)
+               free_page((unsigned long)vcpu->arch.apic->regs);
 
        kfree(vcpu->arch.apic);
 }
@@ -1065,13 +1061,12 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 
        vcpu->arch.apic = apic;
 
-       apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
-       if (apic->regs_page == NULL) {
+       apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!apic->regs) {
                printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
                       vcpu->vcpu_id);
                goto nomem_free_apic;
        }
-       apic->regs = page_address(apic->regs_page);
        apic->vcpu = vcpu;
 
        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
index f5fe32c..52c9e6b 100644 (file)
@@ -13,7 +13,6 @@ struct kvm_lapic {
        u32 divide_count;
        struct kvm_vcpu *vcpu;
        bool irr_pending;
-       struct page *regs_page;
        void *regs;
        gpa_t vapic_addr;
        struct page *vapic_page;
index f02b8ed..22fae75 100644 (file)
@@ -111,9 +111,6 @@ module_param(oos_shadow, bool, 0644);
 #define PT64_LEVEL_SHIFT(level) \
                (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
 
-#define PT64_LEVEL_MASK(level) \
-               (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
-
 #define PT64_INDEX(address, level)\
        (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
 
@@ -123,8 +120,6 @@ module_param(oos_shadow, bool, 0644);
 #define PT32_LEVEL_SHIFT(level) \
                (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
 
-#define PT32_LEVEL_MASK(level) \
-               (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
 #define PT32_LVL_OFFSET_MASK(level) \
        (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
                                                * PT32_LEVEL_BITS))) - 1))
@@ -379,15 +374,15 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
                                       int min)
 {
-       struct page *page;
+       void *page;
 
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = alloc_page(GFP_KERNEL);
+               page = (void *)__get_free_page(GFP_KERNEL);
                if (!page)
                        return -ENOMEM;
-               cache->objects[cache->nobjs++] = page_address(page);
+               cache->objects[cache->nobjs++] = page;
        }
        return 0;
 }
@@ -554,13 +549,23 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
        return ret;
 }
 
-static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+static struct kvm_memory_slot *
+gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
+                           bool no_dirty_log)
 {
        struct kvm_memory_slot *slot;
-       slot = gfn_to_memslot(vcpu->kvm, large_gfn);
-       if (slot && slot->dirty_bitmap)
-               return true;
-       return false;
+
+       slot = gfn_to_memslot(vcpu->kvm, gfn);
+       if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
+             (no_dirty_log && slot->dirty_bitmap))
+               slot = NULL;
+
+       return slot;
+}
+
+static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+{
+       return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
 }
 
 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
@@ -1032,9 +1037,9 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        ASSERT(is_empty_shadow_page(sp->spt));
        hlist_del(&sp->hash_link);
        list_del(&sp->link);
-       __free_page(virt_to_page(sp->spt));
+       free_page((unsigned long)sp->spt);
        if (!sp->role.direct)
-               __free_page(virt_to_page(sp->gfns));
+               free_page((unsigned long)sp->gfns);
        kmem_cache_free(mmu_page_header_cache, sp);
        kvm_mod_used_mmu_pages(kvm, -1);
 }
@@ -1199,6 +1204,13 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
 }
 
+static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
+                                struct kvm_mmu_page *sp, u64 *spte,
+                                const void *pte, unsigned long mmu_seq)
+{
+       WARN_ON(1);
+}
+
 #define KVM_PAGE_ARRAY_NR 16
 
 struct kvm_mmu_pages {
@@ -2150,26 +2162,13 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
 
-static struct kvm_memory_slot *
-pte_prefetch_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log)
-{
-       struct kvm_memory_slot *slot;
-
-       slot = gfn_to_memslot(vcpu->kvm, gfn);
-       if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
-             (no_dirty_log && slot->dirty_bitmap))
-               slot = NULL;
-
-       return slot;
-}
-
 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
                                     bool no_dirty_log)
 {
        struct kvm_memory_slot *slot;
        unsigned long hva;
 
-       slot = pte_prefetch_gfn_to_memslot(vcpu, gfn, no_dirty_log);
+       slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
        if (!slot) {
                get_page(bad_page);
                return page_to_pfn(bad_page);
@@ -2190,7 +2189,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
        gfn_t gfn;
 
        gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
-       if (!pte_prefetch_gfn_to_memslot(vcpu, gfn, access & ACC_WRITE_MASK))
+       if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
                return -1;
 
        ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
@@ -2804,6 +2803,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
        context->prefetch_page = nonpaging_prefetch_page;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = nonpaging_invlpg;
+       context->update_pte = nonpaging_update_pte;
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
@@ -2933,6 +2933,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
        context->prefetch_page = paging64_prefetch_page;
        context->sync_page = paging64_sync_page;
        context->invlpg = paging64_invlpg;
+       context->update_pte = paging64_update_pte;
        context->free = paging_free;
        context->root_level = level;
        context->shadow_root_level = level;
@@ -2961,6 +2962,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
        context->prefetch_page = paging32_prefetch_page;
        context->sync_page = paging32_sync_page;
        context->invlpg = paging32_invlpg;
+       context->update_pte = paging32_update_pte;
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
@@ -2985,6 +2987,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        context->prefetch_page = nonpaging_prefetch_page;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = nonpaging_invlpg;
+       context->update_pte = nonpaging_update_pte;
        context->shadow_root_level = kvm_x86_ops->get_tdp_level();
        context->root_hpa = INVALID_PAGE;
        context->direct_map = true;
@@ -3089,8 +3092,6 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
 
 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.update_pte.pfn = bad_pfn;
-
        if (mmu_is_nested(vcpu))
                return init_kvm_nested_mmu(vcpu);
        else if (tdp_enabled)
@@ -3164,7 +3165,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu_page *sp,
                                  u64 *spte,
-                                 const void *new)
+                                 const void *new, unsigned long mmu_seq)
 {
        if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
                ++vcpu->kvm->stat.mmu_pde_zapped;
@@ -3172,10 +3173,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
         }
 
        ++vcpu->kvm->stat.mmu_pte_updated;
-       if (!sp->role.cr4_pae)
-               paging32_update_pte(vcpu, sp, spte, new);
-       else
-               paging64_update_pte(vcpu, sp, spte, new);
+       vcpu->arch.mmu.update_pte(vcpu, sp, spte, new, mmu_seq);
 }
 
 static bool need_remote_flush(u64 old, u64 new)
@@ -3210,28 +3208,6 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
        return !!(spte && (*spte & shadow_accessed_mask));
 }
 
-static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                                         u64 gpte)
-{
-       gfn_t gfn;
-       pfn_t pfn;
-
-       if (!is_present_gpte(gpte))
-               return;
-       gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
-
-       vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       smp_rmb();
-       pfn = gfn_to_pfn(vcpu->kvm, gfn);
-
-       if (is_error_pfn(pfn)) {
-               kvm_release_pfn_clean(pfn);
-               return;
-       }
-       vcpu->arch.update_pte.gfn = gfn;
-       vcpu->arch.update_pte.pfn = pfn;
-}
-
 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        u64 *spte = vcpu->arch.last_pte_updated;
@@ -3253,21 +3229,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        struct kvm_mmu_page *sp;
        struct hlist_node *node;
        LIST_HEAD(invalid_list);
-       u64 entry, gentry;
-       u64 *spte;
-       unsigned offset = offset_in_page(gpa);
-       unsigned pte_size;
-       unsigned page_offset;
-       unsigned misaligned;
-       unsigned quadrant;
-       int level;
-       int flooded = 0;
-       int npte;
-       int r;
-       int invlpg_counter;
+       unsigned long mmu_seq;
+       u64 entry, gentry, *spte;
+       unsigned pte_size, page_offset, misaligned, quadrant, offset;
+       int level, npte, invlpg_counter, r, flooded = 0;
        bool remote_flush, local_flush, zap_page;
 
        zap_page = remote_flush = local_flush = false;
+       offset = offset_in_page(gpa);
 
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
@@ -3275,9 +3244,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
        /*
         * Assume that the pte write on a page table of the same type
-        * as the current vcpu paging mode.  This is nearly always true
-        * (might be false while changing modes).  Note it is verified later
-        * by update_pte().
+        * as the current vcpu paging mode since we update the sptes only
+        * when they have the same mode.
         */
        if ((is_pae(vcpu) && bytes == 4) || !new) {
                /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
@@ -3303,15 +3271,17 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                break;
        }
 
-       mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       smp_rmb();
+
        spin_lock(&vcpu->kvm->mmu_lock);
        if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
                gentry = 0;
-       kvm_mmu_access_page(vcpu, gfn);
        kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
        trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
        if (guest_initiated) {
+               kvm_mmu_access_page(vcpu, gfn);
                if (gfn == vcpu->arch.last_pt_write_gfn
                    && !last_updated_pte_accessed(vcpu)) {
                        ++vcpu->arch.last_pt_write_count;
@@ -3375,7 +3345,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                        if (gentry &&
                              !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
                              & mask.word))
-                               mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
+                               mmu_pte_write_new_pte(vcpu, sp, spte, &gentry,
+                                                     mmu_seq);
                        if (!remote_flush && need_remote_flush(entry, *spte))
                                remote_flush = true;
                        ++spte;
@@ -3385,10 +3356,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
        trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
        spin_unlock(&vcpu->kvm->mmu_lock);
-       if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
-               kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
-               vcpu->arch.update_pte.pfn = bad_pfn;
-       }
 }
 
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
@@ -3538,14 +3505,23 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                if (!test_bit(slot, sp->slot_bitmap))
                        continue;
 
-               if (sp->role.level != PT_PAGE_TABLE_LEVEL)
-                       continue;
-
                pt = sp->spt;
-               for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+               for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+                       if (!is_shadow_present_pte(pt[i]) ||
+                             !is_last_spte(pt[i], sp->role.level))
+                               continue;
+
+                       if (is_large_pte(pt[i])) {
+                               drop_spte(kvm, &pt[i],
+                                         shadow_trap_nonpresent_pte);
+                               --kvm->stat.lpages;
+                               continue;
+                       }
+
                        /* avoid RMW */
                        if (is_writable_pte(pt[i]))
                                update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
+               }
        }
        kvm_flush_remote_tlbs(kvm);
 }
@@ -3583,7 +3559,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
        if (nr_to_scan == 0)
                goto out;
 
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
                int idx, freed_pages;
@@ -3606,7 +3582,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
        if (kvm_freed)
                list_move_tail(&kvm_freed->vm_list, &vm_list);
 
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 
 out:
        return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
index 6bccc24..7514050 100644 (file)
@@ -31,7 +31,6 @@
        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
-       #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
        #define PT_LEVEL_BITS PT64_LEVEL_BITS
        #ifdef CONFIG_X86_64
        #define PT_MAX_FULL_LEVELS 4
@@ -48,7 +47,6 @@
        #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
        #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
        #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
-       #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
        #define PT_LEVEL_BITS PT32_LEVEL_BITS
        #define PT_MAX_FULL_LEVELS 2
        #define CMPXCHG cmpxchg
@@ -327,7 +325,7 @@ no_present:
 }
 
 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-                             u64 *spte, const void *pte)
+                             u64 *spte, const void *pte, unsigned long mmu_seq)
 {
        pt_element_t gpte;
        unsigned pte_access;
@@ -339,14 +337,14 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 
        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
        pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
-       if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
+       pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
+       if (is_error_pfn(pfn)) {
+               kvm_release_pfn_clean(pfn);
                return;
-       pfn = vcpu->arch.update_pte.pfn;
-       if (is_error_pfn(pfn))
-               return;
-       if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
+       }
+       if (mmu_notifier_retry(vcpu, mmu_seq))
                return;
-       kvm_get_pfn(pfn);
+
        /*
         * we call mmu_set_spte() with host_writable = true beacuse that
         * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
@@ -829,7 +827,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 #undef FNAME
 #undef PT_BASE_ADDR_MASK
 #undef PT_INDEX
-#undef PT_LEVEL_MASK
 #undef PT_LVL_ADDR_MASK
 #undef PT_LVL_OFFSET_MASK
 #undef PT_LEVEL_BITS
index 63fec15..6bb15d5 100644 (file)
@@ -135,6 +135,8 @@ struct vcpu_svm {
 
        u32 *msrpm;
 
+       ulong nmi_iret_rip;
+
        struct nested_state nested;
 
        bool nmi_singlestep;
@@ -1153,7 +1155,9 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
        wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
        load_gs_index(svm->host.gs);
 #else
+#ifdef CONFIG_X86_32_LAZY_GS
        loadsegment(gs, svm->host.gs);
+#endif
 #endif
        for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
                wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
@@ -2653,6 +2657,7 @@ static int iret_interception(struct vcpu_svm *svm)
        ++svm->vcpu.stat.nmi_window_exits;
        clr_intercept(svm, INTERCEPT_IRET);
        svm->vcpu.arch.hflags |= HF_IRET_MASK;
+       svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
        return 1;
 }
 
@@ -3474,7 +3479,12 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
 
        svm->int3_injected = 0;
 
-       if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
+       /*
+        * If we've made progress since setting HF_IRET_MASK, we've
+        * executed an IRET and can allow NMI injection.
+        */
+       if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
+           && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
                svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
                kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
        }
@@ -3641,19 +3651,30 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
        loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+       loadsegment(gs, svm->host.gs);
+#endif
 #endif
 
        reload_tss(vcpu);
 
        local_irq_disable();
 
-       stgi();
-
        vcpu->arch.cr2 = svm->vmcb->save.cr2;
        vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
        vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
        vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 
+       if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
+               kvm_before_handle_nmi(&svm->vcpu);
+
+       stgi();
+
+       /* Any pending NMI will happen here */
+
+       if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
+               kvm_after_handle_nmi(&svm->vcpu);
+
        sync_cr8_to_lapic(vcpu);
 
        svm->next_rip = 0;
index bf89ec2..5b4cdcb 100644 (file)
@@ -93,14 +93,14 @@ module_param(yield_on_hlt, bool, S_IRUGO);
  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
  * ple_gap:    upper bound on the amount of time between two successive
  *             executions of PAUSE in a loop. Also indicate if ple enabled.
- *             According to test, this time is usually small than 41 cycles.
+ *             According to test, this time is usually smaller than 128 cycles.
  * ple_window: upper bound on the amount of time a guest is allowed to execute
  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
  *             less than 2^12 cycles
  * Time is measured based on a counter that runs at the same rate as the TSC,
  * refer SDM volume 3b section 21.6.13 & 22.1.3.
  */
-#define KVM_VMX_DEFAULT_PLE_GAP    41
+#define KVM_VMX_DEFAULT_PLE_GAP    128
 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
 module_param(ple_gap, int, S_IRUGO);
@@ -176,11 +176,11 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
        return container_of(vcpu, struct vcpu_vmx, vcpu);
 }
 
-static int init_rmode(struct kvm *kvm);
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -1333,19 +1333,25 @@ static __init int vmx_disabled_by_bios(void)
 
        rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
        if (msr & FEATURE_CONTROL_LOCKED) {
+               /* launched w/ TXT and VMX disabled */
                if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
                        && tboot_enabled())
                        return 1;
+               /* launched w/o TXT and VMX only enabled w/ TXT */
                if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+                       && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
                        && !tboot_enabled()) {
                        printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
-                               " activate TXT before enabling KVM\n");
+                               "activate TXT before enabling KVM\n");
                        return 1;
                }
+               /* launched w/o TXT and VMX disabled */
+               if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+                       && !tboot_enabled())
+                       return 1;
        }
 
        return 0;
-       /* locked but not enabled */
 }
 
 static void kvm_cpu_vmxon(u64 addr)
@@ -1683,6 +1689,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        vmx->emulation_required = 1;
        vmx->rmode.vm86_active = 0;
 
+       vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector);
        vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
        vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
        vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
@@ -1756,6 +1763,19 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
        vmx->emulation_required = 1;
        vmx->rmode.vm86_active = 1;
 
+       /*
+        * Very old userspace does not call KVM_SET_TSS_ADDR before entering
+        * vcpu. Call it here with phys address pointing 16M below 4G.
+        */
+       if (!vcpu->kvm->arch.tss_addr) {
+               printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
+                            "called before entering vcpu\n");
+               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+               vmx_set_tss_addr(vcpu->kvm, 0xfeffd000);
+               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       }
+
+       vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR);
        vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
        vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
 
@@ -1794,7 +1814,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 
 continue_rmode:
        kvm_mmu_reset_context(vcpu);
-       init_rmode(vcpu->kvm);
 }
 
 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -2030,23 +2049,40 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        vmcs_writel(GUEST_CR4, hw_cr4);
 }
 
-static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
-{
-       struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
-
-       return vmcs_readl(sf->base);
-}
-
 static void vmx_get_segment(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+       struct kvm_save_segment *save;
        u32 ar;
 
+       if (vmx->rmode.vm86_active
+           && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES
+               || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS
+               || seg == VCPU_SREG_GS)
+           && !emulate_invalid_guest_state) {
+               switch (seg) {
+               case VCPU_SREG_TR: save = &vmx->rmode.tr; break;
+               case VCPU_SREG_ES: save = &vmx->rmode.es; break;
+               case VCPU_SREG_DS: save = &vmx->rmode.ds; break;
+               case VCPU_SREG_FS: save = &vmx->rmode.fs; break;
+               case VCPU_SREG_GS: save = &vmx->rmode.gs; break;
+               default: BUG();
+               }
+               var->selector = save->selector;
+               var->base = save->base;
+               var->limit = save->limit;
+               ar = save->ar;
+               if (seg == VCPU_SREG_TR
+                   || var->selector == vmcs_read16(sf->selector))
+                       goto use_saved_rmode_seg;
+       }
        var->base = vmcs_readl(sf->base);
        var->limit = vmcs_read32(sf->limit);
        var->selector = vmcs_read16(sf->selector);
        ar = vmcs_read32(sf->ar_bytes);
+use_saved_rmode_seg:
        if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
                ar = 0;
        var->type = ar & 15;
@@ -2060,6 +2096,18 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
        var->unusable = (ar >> 16) & 1;
 }
 
+static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+       struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+       struct kvm_segment s;
+
+       if (to_vmx(vcpu)->rmode.vm86_active) {
+               vmx_get_segment(vcpu, &s, seg);
+               return s.base;
+       }
+       return vmcs_readl(sf->base);
+}
+
 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
 {
        if (!is_protmode(vcpu))
@@ -2101,6 +2149,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
        u32 ar;
 
        if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
+               vmcs_write16(sf->selector, var->selector);
                vmx->rmode.tr.selector = var->selector;
                vmx->rmode.tr.base = var->base;
                vmx->rmode.tr.limit = var->limit;
@@ -2361,11 +2410,12 @@ static bool guest_state_valid(struct kvm_vcpu *vcpu)
 
 static int init_rmode_tss(struct kvm *kvm)
 {
-       gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
+       gfn_t fn;
        u16 data = 0;
-       int ret = 0;
-       int r;
+       int r, idx, ret = 0;
 
+       idx = srcu_read_lock(&kvm->srcu);
+       fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
        r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
        if (r < 0)
                goto out;
@@ -2389,12 +2439,13 @@ static int init_rmode_tss(struct kvm *kvm)
 
        ret = 1;
 out:
+       srcu_read_unlock(&kvm->srcu, idx);
        return ret;
 }
 
 static int init_rmode_identity_map(struct kvm *kvm)
 {
-       int i, r, ret;
+       int i, idx, r, ret;
        pfn_t identity_map_pfn;
        u32 tmp;
 
@@ -2409,6 +2460,7 @@ static int init_rmode_identity_map(struct kvm *kvm)
                return 1;
        ret = 0;
        identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
+       idx = srcu_read_lock(&kvm->srcu);
        r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
        if (r < 0)
                goto out;
@@ -2424,6 +2476,7 @@ static int init_rmode_identity_map(struct kvm *kvm)
        kvm->arch.ept_identity_pagetable_done = true;
        ret = 1;
 out:
+       srcu_read_unlock(&kvm->srcu, idx);
        return ret;
 }
 
@@ -2699,22 +2752,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        return 0;
 }
 
-static int init_rmode(struct kvm *kvm)
-{
-       int idx, ret = 0;
-
-       idx = srcu_read_lock(&kvm->srcu);
-       if (!init_rmode_tss(kvm))
-               goto exit;
-       if (!init_rmode_identity_map(kvm))
-               goto exit;
-
-       ret = 1;
-exit:
-       srcu_read_unlock(&kvm->srcu, idx);
-       return ret;
-}
-
 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2722,10 +2759,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        int ret;
 
        vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
-       if (!init_rmode(vmx->vcpu.kvm)) {
-               ret = -ENOMEM;
-               goto out;
-       }
 
        vmx->rmode.vm86_active = 0;
 
@@ -2805,7 +2838,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
                vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
                if (vm_need_tpr_shadow(vmx->vcpu.kvm))
                        vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
-                               page_to_phys(vmx->vcpu.arch.apic->regs_page));
+                                    __pa(vmx->vcpu.arch.apic->regs));
                vmcs_write32(TPR_THRESHOLD, 0);
        }
 
@@ -2971,6 +3004,9 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
        if (ret)
                return ret;
        kvm->arch.tss_addr = addr;
+       if (!init_rmode_tss(kvm))
+               return  -ENOMEM;
+
        return 0;
 }
 
@@ -3962,7 +3998,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
 #define Q "l"
 #endif
 
-static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
@@ -3991,6 +4027,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        asm(
                /* Store host registers */
                "push %%"R"dx; push %%"R"bp;"
+               "push %%"R"cx \n\t" /* placeholder for guest rcx */
                "push %%"R"cx \n\t"
                "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
                "je 1f \n\t"
@@ -4032,10 +4069,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
                ".Lkvm_vmx_return: "
                /* Save guest registers, load host registers, keep flags */
-               "xchg %0,     (%%"R"sp) \n\t"
+               "mov %0, %c[wordsize](%%"R"sp) \n\t"
+               "pop %0 \n\t"
                "mov %%"R"ax, %c[rax](%0) \n\t"
                "mov %%"R"bx, %c[rbx](%0) \n\t"
-               "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
+               "pop"Q" %c[rcx](%0) \n\t"
                "mov %%"R"dx, %c[rdx](%0) \n\t"
                "mov %%"R"si, %c[rsi](%0) \n\t"
                "mov %%"R"di, %c[rdi](%0) \n\t"
@@ -4053,7 +4091,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %%cr2, %%"R"ax   \n\t"
                "mov %%"R"ax, %c[cr2](%0) \n\t"
 
-               "pop  %%"R"bp; pop  %%"R"bp; pop  %%"R"dx \n\t"
+               "pop  %%"R"bp; pop  %%"R"dx \n\t"
                "setbe %c[fail](%0) \n\t"
              : : "c"(vmx), "d"((unsigned long)HOST_RSP),
                [launched]"i"(offsetof(struct vcpu_vmx, launched)),
@@ -4076,7 +4114,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
                [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
 #endif
-               [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
+               [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+               [wordsize]"i"(sizeof(ulong))
              : "cc", "memory"
                , R"ax", R"bx", R"di", R"si"
 #ifdef CONFIG_X86_64
@@ -4183,8 +4222,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                if (!kvm->arch.ept_identity_map_addr)
                        kvm->arch.ept_identity_map_addr =
                                VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+               err = -ENOMEM;
                if (alloc_identity_pagetable(kvm) != 0)
                        goto free_vmcs;
+               if (!init_rmode_identity_map(kvm))
+                       goto free_vmcs;
        }
 
        return &vmx->vcpu;
index bcc0efc..f1e4025 100644 (file)
  * - enable LME and LMA per default on 64 bit KVM
  */
 #ifdef CONFIG_X86_64
-static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
+static
+u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
 #else
-static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
+static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #endif
 
 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
@@ -360,8 +361,8 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
 {
+       kvm_make_request(KVM_REQ_NMI, vcpu);
        kvm_make_request(KVM_REQ_EVENT, vcpu);
-       vcpu->arch.nmi_pending = 1;
 }
 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
 
@@ -525,8 +526,10 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        kvm_x86_ops->set_cr0(vcpu, cr0);
 
-       if ((cr0 ^ old_cr0) & X86_CR0_PG)
+       if ((cr0 ^ old_cr0) & X86_CR0_PG) {
                kvm_clear_async_pf_completion_queue(vcpu);
+               kvm_async_pf_hash_reset(vcpu);
+       }
 
        if ((cr0 ^ old_cr0) & update_bits)
                kvm_mmu_reset_context(vcpu);
@@ -1017,7 +1020,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
        unsigned long flags;
        s64 sdiff;
 
-       spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+       raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
        offset = data - native_read_tsc();
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
@@ -1050,7 +1053,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
        kvm->arch.last_tsc_write = data;
        kvm->arch.last_tsc_offset = offset;
        kvm_x86_ops->write_tsc_offset(vcpu, offset);
-       spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+       raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
        /* Reset of TSC must disable overshoot protection below */
        vcpu->arch.hv_clock.tsc_timestamp = 0;
@@ -1453,6 +1456,14 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
        return 0;
 }
 
+static void kvmclock_reset(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.time_page) {
+               kvm_release_page_dirty(vcpu->arch.time_page);
+               vcpu->arch.time_page = NULL;
+       }
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
@@ -1510,10 +1521,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                break;
        case MSR_KVM_SYSTEM_TIME_NEW:
        case MSR_KVM_SYSTEM_TIME: {
-               if (vcpu->arch.time_page) {
-                       kvm_release_page_dirty(vcpu->arch.time_page);
-                       vcpu->arch.time_page = NULL;
-               }
+               kvmclock_reset(vcpu);
 
                vcpu->arch.time = data;
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -1592,6 +1600,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                } else
                        return set_msr_hyperv(vcpu, msr, data);
                break;
+       case MSR_IA32_BBL_CR_CTL3:
+               /* Drop writes to this legacy MSR -- see rdmsr
+                * counterpart for further detail.
+                */
+               pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
+               break;
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
@@ -1846,6 +1860,19 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                } else
                        return get_msr_hyperv(vcpu, msr, pdata);
                break;
+       case MSR_IA32_BBL_CR_CTL3:
+               /* This legacy MSR exists but isn't fully documented in current
+                * silicon.  It is however accessed by winxp in very narrow
+                * scenarios where it sets bit #19, itself documented as
+                * a "reserved" bit.  Best effort attempt to source coherent
+                * read data here should the balance of the register be
+                * interpreted by the guest:
+                *
+                * L2 cache control register 3: 64GB range, 256KB size,
+                * enabled, latency 0x1, configured
+                */
+               data = 0xbe702111;
+               break;
        default:
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -2100,8 +2127,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (check_tsc_unstable()) {
                        kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
                        vcpu->arch.tsc_catchup = 1;
-                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                }
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                if (vcpu->cpu != cpu)
                        kvm_migrate_timers(vcpu);
                vcpu->cpu = cpu;
@@ -2575,9 +2602,6 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
        if (mce->status & MCI_STATUS_UC) {
                if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
                    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
-                       printk(KERN_DEBUG "kvm: set_mce: "
-                              "injects mce exception while "
-                              "previous one is in progress!\n");
                        kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
                        return 0;
                }
@@ -2648,8 +2672,6 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        vcpu->arch.interrupt.pending = events->interrupt.injected;
        vcpu->arch.interrupt.nr = events->interrupt.nr;
        vcpu->arch.interrupt.soft = events->interrupt.soft;
-       if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
-               kvm_pic_clear_isr_ack(vcpu->kvm);
        if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
                kvm_x86_ops->set_interrupt_shadow(vcpu,
                                                  events->interrupt.shadow);
@@ -4140,8 +4162,8 @@ static unsigned long emulator_get_cached_segment_base(int seg,
        return get_segment_base(vcpu, seg);
 }
 
-static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
-                                          struct kvm_vcpu *vcpu)
+static bool emulator_get_cached_descriptor(struct desc_struct *desc, u32 *base3,
+                                          int seg, struct kvm_vcpu *vcpu)
 {
        struct kvm_segment var;
 
@@ -4154,6 +4176,10 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
                var.limit >>= 12;
        set_desc_limit(desc, var.limit);
        set_desc_base(desc, (unsigned long)var.base);
+#ifdef CONFIG_X86_64
+       if (base3)
+               *base3 = var.base >> 32;
+#endif
        desc->type = var.type;
        desc->s = var.s;
        desc->dpl = var.dpl;
@@ -4166,8 +4192,8 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
        return true;
 }
 
-static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
-                                          struct kvm_vcpu *vcpu)
+static void emulator_set_cached_descriptor(struct desc_struct *desc, u32 base3,
+                                          int seg, struct kvm_vcpu *vcpu)
 {
        struct kvm_segment var;
 
@@ -4175,6 +4201,9 @@ static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
        kvm_get_segment(vcpu, &var, seg);
 
        var.base = get_desc_base(desc);
+#ifdef CONFIG_X86_64
+       var.base |= ((u64)base3) << 32;
+#endif
        var.limit = get_desc_limit(desc);
        if (desc->g)
                var.limit = (var.limit << 12) | 0xfff;
@@ -4390,41 +4419,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                vcpu->arch.emulate_ctxt.have_exception = false;
                vcpu->arch.emulate_ctxt.perm_ok = false;
 
+               vcpu->arch.emulate_ctxt.only_vendor_specific_insn
+                       = emulation_type & EMULTYPE_TRAP_UD;
+
                r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
-               if (r == X86EMUL_PROPAGATE_FAULT)
-                       goto done;
 
                trace_kvm_emulate_insn_start(vcpu);
-
-               /* Only allow emulation of specific instructions on #UD
-                * (namely VMMCALL, sysenter, sysexit, syscall)*/
-               if (emulation_type & EMULTYPE_TRAP_UD) {
-                       if (!c->twobyte)
-                               return EMULATE_FAIL;
-                       switch (c->b) {
-                       case 0x01: /* VMMCALL */
-                               if (c->modrm_mod != 3 || c->modrm_rm != 1)
-                                       return EMULATE_FAIL;
-                               break;
-                       case 0x34: /* sysenter */
-                       case 0x35: /* sysexit */
-                               if (c->modrm_mod != 0 || c->modrm_rm != 0)
-                                       return EMULATE_FAIL;
-                               break;
-                       case 0x05: /* syscall */
-                               if (c->modrm_mod != 0 || c->modrm_rm != 0)
-                                       return EMULATE_FAIL;
-                               break;
-                       default:
-                               return EMULATE_FAIL;
-                       }
-
-                       if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
-                               return EMULATE_FAIL;
-               }
-
                ++vcpu->stat.insn_emulation;
                if (r)  {
+                       if (emulation_type & EMULTYPE_TRAP_UD)
+                               return EMULATE_FAIL;
                        if (reexecute_instruction(vcpu, cr2))
                                return EMULATE_DONE;
                        if (emulation_type & EMULTYPE_SKIP)
@@ -4452,7 +4456,6 @@ restart:
                return handle_emulation_failure(vcpu);
        }
 
-done:
        if (vcpu->arch.emulate_ctxt.have_exception) {
                inject_emulated_exception(vcpu);
                r = EMULATE_DONE;
@@ -4562,7 +4565,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
 
        smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
 
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != freq->cpu)
@@ -4572,7 +4575,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
                                send_ipi = 1;
                }
        }
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 
        if (freq->old < freq->new && send_ipi) {
                /*
@@ -5185,6 +5188,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        r = 1;
                        goto out;
                }
+               if (kvm_check_request(KVM_REQ_NMI, vcpu))
+                       vcpu->arch.nmi_pending = true;
        }
 
        r = kvm_mmu_reload(vcpu);
@@ -5213,14 +5218,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                kvm_load_guest_fpu(vcpu);
        kvm_load_guest_xcr0(vcpu);
 
-       atomic_set(&vcpu->guest_mode, 1);
-       smp_wmb();
+       vcpu->mode = IN_GUEST_MODE;
+
+       /* We should set ->mode before check ->requests,
+        * see the comment in make_all_cpus_request.
+        */
+       smp_mb();
 
        local_irq_disable();
 
-       if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
+       if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
            || need_resched() || signal_pending(current)) {
-               atomic_set(&vcpu->guest_mode, 0);
+               vcpu->mode = OUTSIDE_GUEST_MODE;
                smp_wmb();
                local_irq_enable();
                preempt_enable();
@@ -5256,7 +5265,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
 
-       atomic_set(&vcpu->guest_mode, 0);
+       vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
        local_irq_enable();
 
@@ -5574,7 +5583,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
        int mmu_reset_needed = 0;
-       int pending_vec, max_bits;
+       int pending_vec, max_bits, idx;
        struct desc_ptr dt;
 
        dt.size = sregs->idt.limit;
@@ -5603,10 +5612,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (sregs->cr4 & X86_CR4_OSXSAVE)
                update_cpuid(vcpu);
+
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
                load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
                mmu_reset_needed = 1;
        }
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
        if (mmu_reset_needed)
                kvm_mmu_reset_context(vcpu);
@@ -5617,8 +5629,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        if (pending_vec < max_bits) {
                kvm_queue_interrupt(vcpu, pending_vec, false);
                pr_debug("Set back pending irq %d\n", pending_vec);
-               if (irqchip_in_kernel(vcpu->kvm))
-                       kvm_pic_clear_isr_ack(vcpu->kvm);
        }
 
        kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
@@ -5814,10 +5824,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.time_page) {
-               kvm_release_page_dirty(vcpu->arch.time_page);
-               vcpu->arch.time_page = NULL;
-       }
+       kvmclock_reset(vcpu);
 
        free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
        fx_free(vcpu);
@@ -5878,6 +5885,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        kvm_make_request(KVM_REQ_EVENT, vcpu);
        vcpu->arch.apf.msr_val = 0;
 
+       kvmclock_reset(vcpu);
+
        kvm_clear_async_pf_completion_queue(vcpu);
        kvm_async_pf_hash_reset(vcpu);
        vcpu->arch.apf.halted = false;
@@ -6005,7 +6014,7 @@ int kvm_arch_init_vm(struct kvm *kvm)
        /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
        set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
 
-       spin_lock_init(&kvm->arch.tsc_write_lock);
+       raw_spin_lock_init(&kvm->arch.tsc_write_lock);
 
        return 0;
 }
@@ -6103,7 +6112,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                                int user_alloc)
 {
 
-       int npages = mem->memory_size >> PAGE_SHIFT;
+       int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
 
        if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
                int ret;
@@ -6118,12 +6127,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                               "failed to munmap memory\n");
        }
 
+       if (!kvm->arch.n_requested_mmu_pages)
+               nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
+
        spin_lock(&kvm->mmu_lock);
-       if (!kvm->arch.n_requested_mmu_pages) {
-               unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
+       if (nr_mmu_pages)
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
-       }
-
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        spin_unlock(&kvm->mmu_lock);
 }
@@ -6157,7 +6166,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 
        me = get_cpu();
        if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
-               if (atomic_xchg(&vcpu->guest_mode, 0))
+               if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
                        smp_send_reschedule(cpu);
        put_cpu();
 }
index bab9f74..cfed539 100644 (file)
@@ -53,8 +53,8 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
 }
 
 /* call with current->mm->mmap_sem held */
-static int __get_user_pages(unsigned long start_page, size_t num_pages,
-                       struct page **p, struct vm_area_struct **vma)
+static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
+                                 struct page **p, struct vm_area_struct **vma)
 {
        unsigned long lock_limit;
        size_t got;
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __get_user_pages(start_page, num_pages, p, NULL);
+       ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
 
        up_write(&current->mm->mmap_sem);
 
index d7a26c1..7689e49 100644 (file)
@@ -51,8 +51,8 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
 /*
  * Call with current->mm->mmap_sem held.
  */
-static int __get_user_pages(unsigned long start_page, size_t num_pages,
-                           struct page **p, struct vm_area_struct **vma)
+static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
+                               struct page **p, struct vm_area_struct **vma)
 {
        unsigned long lock_limit;
        size_t got;
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_write(&current->mm->mmap_sem);
 
-       ret = __get_user_pages(start_page, num_pages, p, NULL);
+       ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
 
        up_write(&current->mm->mmap_sem);
 
index 28cc03b..a1331ce 100644 (file)
 
 #define ERFKILL                132     /* Operation not possible due to RF-kill */
 
+#define EHWPOISON      133     /* Memory page has hardware error */
+
 #endif
index b5021db..ab42855 100644 (file)
@@ -43,6 +43,7 @@
 #define KVM_REQ_DEACTIVATE_FPU    10
 #define KVM_REQ_EVENT             11
 #define KVM_REQ_APF_HALT          12
+#define KVM_REQ_NMI               13
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID    0
 
@@ -98,23 +99,31 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
+enum {
+       OUTSIDE_GUEST_MODE,
+       IN_GUEST_MODE,
+       EXITING_GUEST_MODE
+};
+
 struct kvm_vcpu {
        struct kvm *kvm;
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        struct preempt_notifier preempt_notifier;
 #endif
+       int cpu;
        int vcpu_id;
-       struct mutex mutex;
-       int   cpu;
-       atomic_t guest_mode;
-       struct kvm_run *run;
+       int srcu_idx;
+       int mode;
        unsigned long requests;
        unsigned long guest_debug;
-       int srcu_idx;
+
+       struct mutex mutex;
+       struct kvm_run *run;
 
        int fpu_active;
        int guest_fpu_loaded, guest_xcr0_loaded;
        wait_queue_head_t wq;
+       struct pid *pid;
        int sigset_active;
        sigset_t sigset;
        struct kvm_vcpu_stat stat;
@@ -140,6 +149,11 @@ struct kvm_vcpu {
        struct kvm_vcpu_arch arch;
 };
 
+static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
+{
+       return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
+}
+
 /*
  * Some of the bitops functions do not support too long bitmaps.
  * This number must be determined not to exceed such limits.
@@ -212,7 +226,6 @@ struct kvm_memslots {
 
 struct kvm {
        spinlock_t mmu_lock;
-       raw_spinlock_t requests_lock;
        struct mutex slots_lock;
        struct mm_struct *mm; /* userspace tied to this vm */
        struct kvm_memslots *memslots;
@@ -223,6 +236,7 @@ struct kvm {
 #endif
        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
        atomic_t online_vcpus;
+       int last_boosted_vcpu;
        struct list_head vm_list;
        struct mutex lock;
        struct kvm_io_bus *buses[KVM_NR_BUSES];
@@ -719,11 +733,6 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
        set_bit(req, &vcpu->requests);
 }
 
-static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
-{
-       return test_and_set_bit(req, &vcpu->requests);
-}
-
 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 {
        if (test_bit(req, &vcpu->requests)) {
index ff83798..581703d 100644 (file)
@@ -972,6 +972,10 @@ static inline int handle_mm_fault(struct mm_struct *mm,
 extern int make_pages_present(unsigned long addr, unsigned long end);
 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
 
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                    unsigned long start, int len, unsigned int foll_flags,
+                    struct page **pages, struct vm_area_struct **vmas,
+                    int *nonblocking);
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        unsigned long start, int nr_pages, int write, int force,
                        struct page **pages, struct vm_area_struct **vmas);
@@ -1535,6 +1539,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
 #define FOLL_FORCE     0x10    /* get_user_pages read/write w/o permission */
 #define FOLL_MLOCK     0x40    /* mark page as mlocked */
 #define FOLL_SPLIT     0x80    /* don't return transhuge pages, split them */
+#define FOLL_HWPOISON  0x100   /* check page is hwpoisoned */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
@@ -1627,14 +1632,6 @@ extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p, int access);
 extern atomic_long_t mce_bad_pages;
 extern int soft_offline_page(struct page *page, int flags);
-#ifdef CONFIG_MEMORY_FAILURE
-int is_hwpoison_address(unsigned long addr);
-#else
-static inline int is_hwpoison_address(unsigned long addr)
-{
-       return 0;
-}
-#endif
 
 extern void dump_page(struct page *page);
 
index 25e4291..05b92c4 100644 (file)
@@ -193,6 +193,7 @@ void __put_task_struct(struct task_struct *tsk)
        if (!profile_handoff_task(tsk))
                free_task(tsk);
 }
+EXPORT_SYMBOL_GPL(__put_task_struct);
 
 /*
  * macro override instead of weak attribute alias, to workaround
index 39b65b6..02f2212 100644 (file)
@@ -435,6 +435,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
        rcu_read_unlock();
        return pid;
 }
+EXPORT_SYMBOL_GPL(get_task_pid);
 
 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
 {
@@ -446,6 +447,7 @@ struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
        rcu_read_unlock();
        return result;
 }
+EXPORT_SYMBOL_GPL(get_pid_task);
 
 struct pid *find_get_pid(pid_t nr)
 {
index 6948820..3438dd4 100644 (file)
@@ -245,11 +245,6 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
 }
 #endif /* CONFIG_SPARSEMEM */
 
-int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, int len, unsigned int foll_flags,
-                    struct page **pages, struct vm_area_struct **vmas,
-                    int *nonblocking);
-
 #define ZONE_RECLAIM_NOSCAN    -2
 #define ZONE_RECLAIM_FULL      -1
 #define ZONE_RECLAIM_SOME      0
index 0207c2f..99ccb44 100644 (file)
@@ -1487,35 +1487,3 @@ done:
        /* keep elevated page count for bad page */
        return ret;
 }
-
-/*
- * The caller must hold current->mm->mmap_sem in read mode.
- */
-int is_hwpoison_address(unsigned long addr)
-{
-       pgd_t *pgdp;
-       pud_t pud, *pudp;
-       pmd_t pmd, *pmdp;
-       pte_t pte, *ptep;
-       swp_entry_t entry;
-
-       pgdp = pgd_offset(current->mm, addr);
-       if (!pgd_present(*pgdp))
-               return 0;
-       pudp = pud_offset(pgdp, addr);
-       pud = *pudp;
-       if (!pud_present(pud) || pud_large(pud))
-               return 0;
-       pmdp = pmd_offset(pudp, addr);
-       pmd = *pmdp;
-       if (!pmd_present(pmd) || pmd_large(pmd))
-               return 0;
-       ptep = pte_offset_map(pmdp, addr);
-       pte = *ptep;
-       pte_unmap(ptep);
-       if (!is_swap_pte(pte))
-               return 0;
-       entry = pte_to_swp_entry(pte);
-       return is_hwpoison_entry(entry);
-}
-EXPORT_SYMBOL_GPL(is_hwpoison_address);
index 5823698..346ee7e 100644 (file)
@@ -1410,6 +1410,55 @@ no_page_table:
        return page;
 }
 
+/**
+ * __get_user_pages() - pin user pages in memory
+ * @tsk:       task_struct of target task
+ * @mm:                mm_struct of target mm
+ * @start:     starting user address
+ * @nr_pages:  number of pages from start to pin
+ * @gup_flags: flags modifying pin behaviour
+ * @pages:     array that receives pointers to the pages pinned.
+ *             Should be at least nr_pages long. Or NULL, if caller
+ *             only intends to ensure the pages are faulted in.
+ * @vmas:      array of pointers to vmas corresponding to each page.
+ *             Or NULL if the caller does not require them.
+ * @nonblocking: whether waiting for disk IO or mmap_sem contention
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno. Each page returned must be released
+ * with a put_page() call when it is finished with. vmas will only
+ * remain valid while mmap_sem is held.
+ *
+ * Must be called with mmap_sem held for read or write.
+ *
+ * __get_user_pages walks a process's page tables and takes a reference to
+ * each struct page that each user address corresponds to at a given
+ * instant. That is, it takes the page that would be accessed if a user
+ * thread accesses the given user virtual address at that instant.
+ *
+ * This does not guarantee that the page exists in the user mappings when
+ * __get_user_pages returns, and there may even be a completely different
+ * page there in some cases (eg. if mmapped pagecache has been invalidated
+ * and subsequently re faulted). However it does guarantee that the page
+ * won't be freed completely. And mostly callers simply care that the page
+ * contains data that was valid *at some point in time*. Typically, an IO
+ * or similar operation cannot guarantee anything stronger anyway because
+ * locks can't be held over the syscall boundary.
+ *
+ * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
+ * the page is written to, set_page_dirty (or set_page_dirty_lock, as
+ * appropriate) must be called after the page is finished with, and
+ * before put_page is called.
+ *
+ * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
+ * or mmap_sem contention, and if waiting is needed to pin all pages,
+ * *@nonblocking will be set to 0.
+ *
+ * In most cases, get_user_pages or get_user_pages_fast should be used
+ * instead of __get_user_pages. __get_user_pages should be used only if
+ * you need some special @gup_flags.
+ */
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                     unsigned long start, int nr_pages, unsigned int gup_flags,
                     struct page **pages, struct vm_area_struct **vmas,
@@ -1527,9 +1576,16 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
-                                       if (ret &
-                                           (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE|
-                                            VM_FAULT_SIGBUS))
+                                       if (ret & (VM_FAULT_HWPOISON |
+                                                  VM_FAULT_HWPOISON_LARGE)) {
+                                               if (i)
+                                                       return i;
+                                               else if (gup_flags & FOLL_HWPOISON)
+                                                       return -EHWPOISON;
+                                               else
+                                                       return -EFAULT;
+                                       }
+                                       if (ret & VM_FAULT_SIGBUS)
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
@@ -1578,6 +1634,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        } while (nr_pages);
        return i;
 }
+EXPORT_SYMBOL(__get_user_pages);
 
 /**
  * get_user_pages() - pin user pages in memory
index 2ca4535..3656849 100644 (file)
@@ -313,8 +313,9 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
                if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
                        /*
                         * This rcu_assign_pointer is needed for when
-                        * another thread calls kvm_irqfd_update before
-                        * we flush workqueue below.
+                        * another thread calls kvm_irq_routing_update before
+                        * we flush workqueue below (we synchronize with
+                        * kvm_irq_routing_update using irqfds.lock).
                         * It is paired with synchronize_rcu done by caller
                         * of that function.
                         */
index f29abeb..1fa0d29 100644 (file)
@@ -69,7 +69,7 @@ MODULE_LICENSE("GPL");
  *             kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_RAW_SPINLOCK(kvm_lock);
 LIST_HEAD(vm_list);
 
 static cpumask_var_t cpus_hardware_enabled;
@@ -137,6 +137,14 @@ void vcpu_load(struct kvm_vcpu *vcpu)
        int cpu;
 
        mutex_lock(&vcpu->mutex);
+       if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+               /* The thread running this VCPU changed. */
+               struct pid *oldpid = vcpu->pid;
+               struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+               rcu_assign_pointer(vcpu->pid, newpid);
+               synchronize_rcu();
+               put_pid(oldpid);
+       }
        cpu = get_cpu();
        preempt_notifier_register(&vcpu->preempt_notifier);
        kvm_arch_vcpu_load(vcpu, cpu);
@@ -165,13 +173,16 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
 
        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
-       raw_spin_lock(&kvm->requests_lock);
-       me = smp_processor_id();
+       me = get_cpu();
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (kvm_make_check_request(req, vcpu))
-                       continue;
+               kvm_make_request(req, vcpu);
                cpu = vcpu->cpu;
-               if (cpus != NULL && cpu != -1 && cpu != me)
+
+               /* Set ->requests bit before we read ->mode */
+               smp_mb();
+
+               if (cpus != NULL && cpu != -1 && cpu != me &&
+                     kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
                        cpumask_set_cpu(cpu, cpus);
        }
        if (unlikely(cpus == NULL))
@@ -180,7 +191,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
                smp_call_function_many(cpus, ack_flush, NULL, 1);
        else
                called = false;
-       raw_spin_unlock(&kvm->requests_lock);
+       put_cpu();
        free_cpumask_var(cpus);
        return called;
 }
@@ -209,6 +220,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
        vcpu->cpu = -1;
        vcpu->kvm = kvm;
        vcpu->vcpu_id = id;
+       vcpu->pid = NULL;
        init_waitqueue_head(&vcpu->wq);
        kvm_async_pf_vcpu_init(vcpu);
 
@@ -233,6 +245,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 
 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
+       put_pid(vcpu->pid);
        kvm_arch_vcpu_uninit(vcpu);
        free_page((unsigned long)vcpu->run);
 }
@@ -463,15 +476,14 @@ static struct kvm *kvm_create_vm(void)
        kvm->mm = current->mm;
        atomic_inc(&kvm->mm->mm_count);
        spin_lock_init(&kvm->mmu_lock);
-       raw_spin_lock_init(&kvm->requests_lock);
        kvm_eventfd_init(kvm);
        mutex_init(&kvm->lock);
        mutex_init(&kvm->irq_lock);
        mutex_init(&kvm->slots_lock);
        atomic_set(&kvm->users_count, 1);
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 
        return kvm;
 
@@ -544,9 +556,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
        struct mm_struct *mm = kvm->mm;
 
        kvm_arch_sync_events(kvm);
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        list_del(&kvm->vm_list);
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++)
                kvm_io_bus_destroy(kvm->buses[i]);
@@ -588,6 +600,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
+#ifndef CONFIG_S390
 /*
  * Allocation size is twice as large as the actual dirty bitmap size.
  * This makes it possible to do double buffering: see x86's
@@ -608,6 +621,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
        memslot->dirty_bitmap_head = memslot->dirty_bitmap;
        return 0;
 }
+#endif /* !CONFIG_S390 */
 
 /*
  * Allocate some memory and give it an address in the guest physical address
@@ -621,7 +635,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
                            int user_alloc)
 {
-       int r, flush_shadow = 0;
+       int r;
        gfn_t base_gfn;
        unsigned long npages;
        unsigned long i;
@@ -741,8 +755,6 @@ skip_lpage:
                if (kvm_create_dirty_bitmap(&new) < 0)
                        goto out_free;
                /* destroy any largepage mappings for dirty tracking */
-               if (old.npages)
-                       flush_shadow = 1;
        }
 #else  /* not defined CONFIG_S390 */
        new.user_alloc = user_alloc;
@@ -813,9 +825,6 @@ skip_lpage:
        kvm_free_physmem_slot(&old, &new);
        kfree(old_memslots);
 
-       if (flush_shadow)
-               kvm_arch_flush_shadow(kvm);
-
        return 0;
 
 out_free:
@@ -1029,6 +1038,15 @@ static pfn_t get_fault_pfn(void)
        return fault_pfn;
 }
 
+static inline int check_user_page_hwpoison(unsigned long addr)
+{
+       int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
+
+       rc = __get_user_pages(current, current->mm, addr, 1,
+                             flags, NULL, NULL, NULL);
+       return rc == -EHWPOISON;
+}
+
 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
                        bool *async, bool write_fault, bool *writable)
 {
@@ -1076,7 +1094,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
                        return get_fault_pfn();
 
                down_read(&current->mm->mmap_sem);
-               if (is_hwpoison_address(addr)) {
+               if (check_user_page_hwpoison(addr)) {
                        up_read(&current->mm->mmap_sem);
                        get_page(hwpoison_page);
                        return page_to_pfn(hwpoison_page);
@@ -1466,18 +1484,55 @@ void kvm_resched(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_resched);
 
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
+void kvm_vcpu_on_spin(struct kvm_vcpu *me)
 {
-       ktime_t expires;
-       DEFINE_WAIT(wait);
-
-       prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
-
-       /* Sleep for 100 us, and hope lock-holder got scheduled */
-       expires = ktime_add_ns(ktime_get(), 100000UL);
-       schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
+       struct kvm *kvm = me->kvm;
+       struct kvm_vcpu *vcpu;
+       int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
+       int yielded = 0;
+       int pass;
+       int i;
 
-       finish_wait(&vcpu->wq, &wait);
+       /*
+        * We boost the priority of a VCPU that is runnable but not
+        * currently running, because it got preempted by something
+        * else and called schedule in __vcpu_run.  Hopefully that
+        * VCPU is holding the lock that we need and will release it.
+        * We approximate round-robin by starting at the last boosted VCPU.
+        */
+       for (pass = 0; pass < 2 && !yielded; pass++) {
+               kvm_for_each_vcpu(i, vcpu, kvm) {
+                       struct task_struct *task = NULL;
+                       struct pid *pid;
+                       if (!pass && i < last_boosted_vcpu) {
+                               i = last_boosted_vcpu;
+                               continue;
+                       } else if (pass && i > last_boosted_vcpu)
+                               break;
+                       if (vcpu == me)
+                               continue;
+                       if (waitqueue_active(&vcpu->wq))
+                               continue;
+                       rcu_read_lock();
+                       pid = rcu_dereference(vcpu->pid);
+                       if (pid)
+                               task = get_pid_task(vcpu->pid, PIDTYPE_PID);
+                       rcu_read_unlock();
+                       if (!task)
+                               continue;
+                       if (task->flags & PF_VCPU) {
+                               put_task_struct(task);
+                               continue;
+                       }
+                       if (yield_to(task, 1)) {
+                               put_task_struct(task);
+                               kvm->last_boosted_vcpu = i;
+                               yielded = 1;
+                               break;
+                       }
+                       put_task_struct(task);
+               }
+       }
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
 
@@ -2122,9 +2177,9 @@ static void hardware_enable_nolock(void *junk)
 
 static void hardware_enable(void *junk)
 {
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        hardware_enable_nolock(junk);
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 }
 
 static void hardware_disable_nolock(void *junk)
@@ -2139,9 +2194,9 @@ static void hardware_disable_nolock(void *junk)
 
 static void hardware_disable(void *junk)
 {
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        hardware_disable_nolock(junk);
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 }
 
 static void hardware_disable_all_nolock(void)
@@ -2155,16 +2210,16 @@ static void hardware_disable_all_nolock(void)
 
 static void hardware_disable_all(void)
 {
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        hardware_disable_all_nolock();
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 }
 
 static int hardware_enable_all(void)
 {
        int r = 0;
 
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
 
        kvm_usage_count++;
        if (kvm_usage_count == 1) {
@@ -2177,7 +2232,7 @@ static int hardware_enable_all(void)
                }
        }
 
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 
        return r;
 }
@@ -2339,10 +2394,10 @@ static int vm_stat_get(void *_offset, u64 *val)
        struct kvm *kvm;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                *val += *(u32 *)((void *)kvm + offset);
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
        return 0;
 }
 
@@ -2356,12 +2411,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
        int i;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        *val += *(u32 *)((void *)vcpu + offset);
 
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
        return 0;
 }
 
@@ -2402,7 +2457,7 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)
 static int kvm_resume(struct sys_device *dev)
 {
        if (kvm_usage_count) {
-               WARN_ON(spin_is_locked(&kvm_lock));
+               WARN_ON(raw_spin_is_locked(&kvm_lock));
                hardware_enable_nolock(NULL);
        }
        return 0;