KVM: MMU: Fix and clean up for_each_gfn_* macros
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Wed, 6 Mar 2013 07:05:07 +0000 (16:05 +0900)
committerMarcelo Tosatti <mtosatti@redhat.com>
Thu, 7 Mar 2013 20:26:27 +0000 (17:26 -0300)
The expression (sp)->gfn should not be expanded using @gfn.
Although no user of these macros passes a string other than gfn now,
this should be fixed before anyone sees strange errors.

Note: ignored the following checkpatch errors:
  ERROR: Macros with complex values should be enclosed in parenthesis
  ERROR: trailing statements should be on next line

Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/mmu.c

index 956ca35..3e4822b 100644 (file)
@@ -1644,16 +1644,14 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list);
 
-#define for_each_gfn_sp(kvm, sp, gfn)                                  \
-  hlist_for_each_entry(sp,                                             \
-   &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)  \
-       if ((sp)->gfn != (gfn)) {} else
-
-#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn)                   \
-  hlist_for_each_entry(sp,                                             \
-   &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)  \
-               if ((sp)->gfn != (gfn) || (sp)->role.direct ||          \
-                       (sp)->role.invalid) {} else
+#define for_each_gfn_sp(_kvm, _sp, _gfn)                               \
+       hlist_for_each_entry(_sp,                                       \
+         &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
+               if ((_sp)->gfn != (_gfn)) {} else
+
+#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                        \
+       for_each_gfn_sp(_kvm, _sp, _gfn)                                \
+               if ((_sp)->role.direct || (_sp)->role.invalid) {} else
 
 /* @sp->gfn should be write-protected at the call site */
 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,