KVM: MMU: Fix SMEP failure during fetch
authorYang, Wei Y <wei.y.yang@intel.com>
Tue, 9 Aug 2011 10:14:01 +0000 (18:14 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 25 Sep 2011 16:18:02 +0000 (19:18 +0300)
This patch fix kvm-unit-tests hanging and incorrect PT_ACCESSED_MASK
bit set in the case of SMEP fault.  The code updated 'eperm' after
the variable was checked.

Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/paging_tmpl.h

index f6dd9fe..9299410 100644 (file)
@@ -147,7 +147,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
        gfn_t table_gfn;
        unsigned index, pt_access, uninitialized_var(pte_access);
        gpa_t pte_gpa;
-       bool eperm;
+       bool eperm, last_gpte;
        int offset;
        const int write_fault = access & PFERR_WRITE_MASK;
        const int user_fault  = access & PFERR_USER_MASK;
@@ -221,6 +221,17 @@ retry_walk:
                        eperm = true;
 #endif
 
+               last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
+               if (last_gpte) {
+                       pte_access = pt_access &
+                                    FNAME(gpte_access)(vcpu, pte, true);
+                       /* check if the kernel is fetching from user page */
+                       if (unlikely(pte_access & PT_USER_MASK) &&
+                           kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+                               if (fetch_fault && !user_fault)
+                                       eperm = true;
+               }
+
                if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
                        int ret;
                        trace_kvm_mmu_set_accessed_bit(table_gfn, index,
@@ -238,18 +249,12 @@ retry_walk:
 
                walker->ptes[walker->level - 1] = pte;
 
-               if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
+               if (last_gpte) {
                        int lvl = walker->level;
                        gpa_t real_gpa;
                        gfn_t gfn;
                        u32 ac;
 
-                       /* check if the kernel is fetching from user page */
-                       if (unlikely(pte_access & PT_USER_MASK) &&
-                           kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
-                               if (fetch_fault && !user_fault)
-                                       eperm = true;
-
                        gfn = gpte_to_gfn_lvl(pte, lvl);
                        gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
 
@@ -295,7 +300,6 @@ retry_walk:
                walker->ptes[walker->level - 1] = pte;
        }
 
-       pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true);
        walker->pt_access = pt_access;
        walker->pte_access = pte_access;
        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",