rcu: Instrument synchronize_rcu_expedited() for debugfs tracing
authorPaul E. McKenney <paul.mckenney@linaro.org>
Thu, 11 Oct 2012 23:18:09 +0000 (16:18 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 8 Nov 2012 19:50:13 +0000 (11:50 -0800)
This commit adds the counters to rcu_state and updates them in
synchronize_rcu_expedited() to provide the data needed for debugfs
tracing.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcutree.c
kernel/rcutree.h

index 3c72e5e..b966d56 100644 (file)
@@ -2321,6 +2321,7 @@ void synchronize_sched_expedited(void)
                         (ulong)atomic_long_read(&rsp->expedited_done) +
                         ULONG_MAX / 8)) {
                synchronize_sched();
+               atomic_long_inc(&rsp->expedited_wrap);
                return;
        }
 
@@ -2341,11 +2342,14 @@ void synchronize_sched_expedited(void)
                             synchronize_sched_expedited_cpu_stop,
                             NULL) == -EAGAIN) {
                put_online_cpus();
+               atomic_long_inc(&rsp->expedited_tryfail);
 
                /* Check to see if someone else did our work for us. */
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
-                       smp_mb(); /* ensure test happens before caller kfree */
+                       /* ensure test happens before caller kfree */
+                       smp_mb__before_atomic_inc(); /* ^^^ */
+                       atomic_long_inc(&rsp->expedited_workdone1);
                        return;
                }
 
@@ -2354,13 +2358,16 @@ void synchronize_sched_expedited(void)
                        udelay(trycount * num_online_cpus());
                } else {
                        synchronize_sched();
+                       atomic_long_inc(&rsp->expedited_normal);
                        return;
                }
 
                /* Recheck to see if someone else did our work for us. */
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
-                       smp_mb(); /* ensure test happens before caller kfree */
+                       /* ensure test happens before caller kfree */
+                       smp_mb__before_atomic_inc(); /* ^^^ */
+                       atomic_long_inc(&rsp->expedited_workdone2);
                        return;
                }
 
@@ -2375,6 +2382,7 @@ void synchronize_sched_expedited(void)
                snap = atomic_long_read(&rsp->expedited_start);
                smp_mb(); /* ensure read is before try_stop_cpus(). */
        }
+       atomic_long_inc(&rsp->expedited_stoppedcpus);
 
        /*
         * Everyone up to our most recent fetch is covered by our grace
@@ -2383,12 +2391,16 @@ void synchronize_sched_expedited(void)
         * than we did already did their update.
         */
        do {
+               atomic_long_inc(&rsp->expedited_done_tries);
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
-                       smp_mb(); /* ensure test happens before caller kfree */
+                       /* ensure test happens before caller kfree */
+                       smp_mb__before_atomic_inc(); /* ^^^ */
+                       atomic_long_inc(&rsp->expedited_done_lost);
                        break;
                }
        } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
+       atomic_long_inc(&rsp->expedited_done_exit);
 
        put_online_cpus();
 }
index 88f3d9d..d274af3 100644 (file)
@@ -406,6 +406,15 @@ struct rcu_state {
 
        atomic_long_t expedited_start;          /* Starting ticket. */
        atomic_long_t expedited_done;           /* Done ticket. */
+       atomic_long_t expedited_wrap;           /* # near-wrap incidents. */
+       atomic_long_t expedited_tryfail;        /* # acquisition failures. */
+       atomic_long_t expedited_workdone1;      /* # done by others #1. */
+       atomic_long_t expedited_workdone2;      /* # done by others #2. */
+       atomic_long_t expedited_normal;         /* # fallbacks to normal. */
+       atomic_long_t expedited_stoppedcpus;    /* # successful stop_cpus. */
+       atomic_long_t expedited_done_tries;     /* # tries to update _done. */
+       atomic_long_t expedited_done_lost;      /* # times beaten to _done. */
+       atomic_long_t expedited_done_exit;      /* # times exited _done loop. */
 
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
                                                /*  force_quiescent_state(). */