KVM: MMU: rename 'reset_host_protection' to 'host_writable'
authorLai Jiangshan <laijs@cn.fujitsu.com>
Fri, 19 Nov 2010 09:03:22 +0000 (17:03 +0800)
committerAvi Kivity <avi@redhat.com>
Wed, 12 Jan 2011 09:29:46 +0000 (11:29 +0200)
Rename it to fit its sense better

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index 29b2ec4..5910492 100644 (file)
@@ -1958,7 +1958,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int level,
                    gfn_t gfn, pfn_t pfn, bool speculative,
-                   bool can_unsync, bool reset_host_protection)
+                   bool can_unsync, bool host_writable)
 {
        u64 spte, entry = *sptep;
        int ret = 0;
@@ -1985,7 +1985,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
                        kvm_is_mmio_pfn(pfn));
 
-       if (reset_host_protection)
+       if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
 
        spte |= (u64)pfn << PAGE_SHIFT;
@@ -2048,7 +2048,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                         int user_fault, int write_fault, int dirty,
                         int *ptwrite, int level, gfn_t gfn,
                         pfn_t pfn, bool speculative,
-                        bool reset_host_protection)
+                        bool host_writable)
 {
        int was_rmapped = 0;
        int rmap_count;
@@ -2083,7 +2083,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
        if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
                      dirty, level, gfn, pfn, speculative, true,
-                     reset_host_protection)) {
+                     host_writable)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_mmu_flush_tlb(vcpu);
index ca0e5e8..57619ed 100644 (file)
@@ -329,7 +329,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                return;
        kvm_get_pfn(pfn);
        /*
-        * we call mmu_set_spte() with reset_host_protection = true beacuse that
+        * we call mmu_set_spte() with host_writable = true beacuse that
         * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
         */
        mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
@@ -744,7 +744,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                            bool clear_unsync)
 {
        int i, offset, nr_present;
-       bool reset_host_protection;
+       bool host_writable;
        gpa_t first_pte_gpa;
 
        offset = nr_present = 0;
@@ -794,14 +794,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
                if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
                        pte_access &= ~ACC_WRITE_MASK;
-                       reset_host_protection = 0;
+                       host_writable = 0;
                } else {
-                       reset_host_protection = 1;
+                       host_writable = 1;
                }
                set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
                         is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
                         spte_to_pfn(sp->spt[i]), true, false,
-                        reset_host_protection);
+                        host_writable);
        }
 
        return !nr_present;