perf: Default PMU ops
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 15 Jun 2010 10:22:39 +0000 (12:22 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 9 Sep 2010 18:46:30 +0000 (20:46 +0200)
Provide default implementations for the pmu txn methods, this
allows us to remove some conditional code.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_event.h
kernel/perf_event.c

index 6abf103..bf85733 100644 (file)
@@ -565,8 +565,8 @@ struct pmu {
 
        int                             *pmu_disable_count;
 
-       void (*pmu_enable)              (struct pmu *pmu);
-       void (*pmu_disable)             (struct pmu *pmu);
+       void (*pmu_enable)              (struct pmu *pmu); /* optional */
+       void (*pmu_disable)             (struct pmu *pmu); /* optional */
 
        /*
         * Should return -ENOENT when the @event doesn't match this PMU.
@@ -590,19 +590,19 @@ struct pmu {
         * Start the transaction, after this ->enable() doesn't need to
         * do schedulability tests.
         */
-       void (*start_txn)       (struct pmu *pmu);
+       void (*start_txn)       (struct pmu *pmu); /* optional */
        /*
         * If ->start_txn() disabled the ->enable() schedulability test
         * then ->commit_txn() is required to perform one. On success
         * the transaction is closed. On error the transaction is kept
         * open until ->cancel_txn() is called.
         */
-       int  (*commit_txn)      (struct pmu *pmu);
+       int  (*commit_txn)      (struct pmu *pmu); /* optional */
        /*
         * Will cancel the transaction, assumes ->disable() is called
         * for each successfull ->enable() during the transaction.
         */
-       void (*cancel_txn)      (struct pmu *pmu);
+       void (*cancel_txn)      (struct pmu *pmu); /* optional */
 };
 
 /**
index 5ed0c06..8ef4ba3 100644 (file)
@@ -674,21 +674,14 @@ group_sched_in(struct perf_event *group_event,
 {
        struct perf_event *event, *partial_group = NULL;
        struct pmu *pmu = group_event->pmu;
-       bool txn = false;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
-       /* Check if group transaction availabe */
-       if (pmu->start_txn)
-               txn = true;
-
-       if (txn)
-               pmu->start_txn(pmu);
+       pmu->start_txn(pmu);
 
        if (event_sched_in(group_event, cpuctx, ctx)) {
-               if (txn)
-                       pmu->cancel_txn(pmu);
+               pmu->cancel_txn(pmu);
                return -EAGAIN;
        }
 
@@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event,
                }
        }
 
-       if (!txn || !pmu->commit_txn(pmu))
+       if (!pmu->commit_txn(pmu))
                return 0;
 
 group_error:
@@ -717,8 +710,7 @@ group_error:
        }
        event_sched_out(group_event, cpuctx, ctx);
 
-       if (txn)
-               pmu->cancel_txn(pmu);
+       pmu->cancel_txn(pmu);
 
        return -EAGAIN;
 }
@@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus);
 static DEFINE_MUTEX(pmus_lock);
 static struct srcu_struct pmus_srcu;
 
+static void perf_pmu_nop_void(struct pmu *pmu)
+{
+}
+
+static int perf_pmu_nop_int(struct pmu *pmu)
+{
+       return 0;
+}
+
+static void perf_pmu_start_txn(struct pmu *pmu)
+{
+       perf_pmu_disable(pmu);
+}
+
+static int perf_pmu_commit_txn(struct pmu *pmu)
+{
+       perf_pmu_enable(pmu);
+       return 0;
+}
+
+static void perf_pmu_cancel_txn(struct pmu *pmu)
+{
+       perf_pmu_enable(pmu);
+}
+
 int perf_pmu_register(struct pmu *pmu)
 {
        int ret;
@@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu)
        pmu->pmu_disable_count = alloc_percpu(int);
        if (!pmu->pmu_disable_count)
                goto unlock;
+
+       if (!pmu->start_txn) {
+               if (pmu->pmu_enable) {
+                       /*
+                        * If we have pmu_enable/pmu_disable calls, install
+                        * transaction stubs that use that to try and batch
+                        * hardware accesses.
+                        */
+                       pmu->start_txn  = perf_pmu_start_txn;
+                       pmu->commit_txn = perf_pmu_commit_txn;
+                       pmu->cancel_txn = perf_pmu_cancel_txn;
+               } else {
+                       pmu->start_txn  = perf_pmu_nop_void;
+                       pmu->commit_txn = perf_pmu_nop_int;
+                       pmu->cancel_txn = perf_pmu_nop_void;
+               }
+       }
+
+       if (!pmu->pmu_enable) {
+               pmu->pmu_enable  = perf_pmu_nop_void;
+               pmu->pmu_disable = perf_pmu_nop_void;
+       }
+
        list_add_rcu(&pmu->entry, &pmus);
        ret = 0;
 unlock: