KVM: MMU: trace mmio page fault
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Mon, 11 Jul 2011 19:34:24 +0000 (03:34 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Jul 2011 08:50:41 +0000 (11:50 +0300)
Add tracepoints to trace mmio page fault

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/mmutrace.h
arch/x86/kvm/trace.h
arch/x86/kvm/x86.c

index 4e22df6..9335e1b 100644 (file)
@@ -211,6 +211,7 @@ static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)
 {
        access &= ACC_WRITE_MASK | ACC_USER_MASK;
 
+       trace_mark_mmio_spte(sptep, gfn, access);
        mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
 }
 
@@ -1940,6 +1941,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                kvm_mmu_isolate_pages(invalid_list);
                sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
                list_del_init(invalid_list);
+
+               trace_kvm_mmu_delay_free_pages(sp);
                call_rcu(&sp->rcu, free_pages_rcu);
                return;
        }
@@ -2938,6 +2941,8 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 
                if (direct)
                        addr = 0;
+
+               trace_handle_mmio_page_fault(addr, gfn, access);
                vcpu_cache_mmio_info(vcpu, addr, gfn, access);
                return 1;
        }
index b60b4fd..eed67f3 100644 (file)
@@ -196,6 +196,54 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
        TP_ARGS(sp)
 );
 
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages,
+       TP_PROTO(struct kvm_mmu_page *sp),
+
+       TP_ARGS(sp)
+);
+
+TRACE_EVENT(
+       mark_mmio_spte,
+       TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
+       TP_ARGS(sptep, gfn, access),
+
+       TP_STRUCT__entry(
+               __field(void *, sptep)
+               __field(gfn_t, gfn)
+               __field(unsigned, access)
+       ),
+
+       TP_fast_assign(
+               __entry->sptep = sptep;
+               __entry->gfn = gfn;
+               __entry->access = access;
+       ),
+
+       TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
+                 __entry->access)
+);
+
+TRACE_EVENT(
+       handle_mmio_page_fault,
+       TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
+       TP_ARGS(addr, gfn, access),
+
+       TP_STRUCT__entry(
+               __field(u64, addr)
+               __field(gfn_t, gfn)
+               __field(unsigned, access)
+       ),
+
+       TP_fast_assign(
+               __entry->addr = addr;
+               __entry->gfn = gfn;
+               __entry->access = access;
+       ),
+
+       TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
+                 __entry->access)
+);
+
 TRACE_EVENT(
        kvm_mmu_audit,
        TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
index 624f8cb..3ff898c 100644 (file)
@@ -698,6 +698,29 @@ TRACE_EVENT(kvm_emulate_insn,
 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
 
+TRACE_EVENT(
+       vcpu_match_mmio,
+       TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
+       TP_ARGS(gva, gpa, write, gpa_match),
+
+       TP_STRUCT__entry(
+               __field(gva_t, gva)
+               __field(gpa_t, gpa)
+               __field(bool, write)
+               __field(bool, gpa_match)
+               ),
+
+       TP_fast_assign(
+               __entry->gva = gva;
+               __entry->gpa = gpa;
+               __entry->write = write;
+               __entry->gpa_match = gpa_match
+               ),
+
+       TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
+                 __entry->write ? "Write" : "Read",
+                 __entry->gpa_match ? "GPA" : "GVA")
+);
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
index 2c9661f..84a28ea 100644 (file)
@@ -4021,6 +4021,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
                  vcpu->arch.access)) {
                *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
                                        (gva & (PAGE_SIZE - 1));
+               trace_vcpu_match_mmio(gva, *gpa, write, false);
                return 1;
        }
 
@@ -4036,8 +4037,10 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
        if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
                return 1;
 
-       if (vcpu_match_mmio_gpa(vcpu, *gpa))
+       if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
+               trace_vcpu_match_mmio(gva, *gpa, write, true);
                return 1;
+       }
 
        return 0;
 }