sched: Replace rq->bkl_count with rq->rq_sched_info.bkl_count
authorYong Zhang <yong.zhang0@gmail.com>
Fri, 14 Jan 2011 07:57:39 +0000 (15:57 +0800)
committerIngo Molnar <mingo@elte.hu>
Tue, 18 Jan 2011 14:09:43 +0000 (15:09 +0100)
Now rq->rq_sched_info.bkl_count is not used for rq, scroll
rq->bkl_count into it. Thus we can save some space for rq.

Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1294991859-13246-1-git-send-email-yong.zhang0@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_debug.c

index 6cbff6b..0a169a8 100644 (file)
@@ -553,9 +553,6 @@ struct rq {
        /* try_to_wake_up() stats */
        unsigned int ttwu_count;
        unsigned int ttwu_local;
-
-       /* BKL stats */
-       unsigned int bkl_count;
 #endif
 };
 
@@ -3887,7 +3884,7 @@ static inline void schedule_debug(struct task_struct *prev)
        schedstat_inc(this_rq(), sched_count);
 #ifdef CONFIG_SCHEDSTATS
        if (unlikely(prev->lock_depth >= 0)) {
-               schedstat_inc(this_rq(), bkl_count);
+               schedstat_inc(this_rq(), rq_sched_info.bkl_count);
                schedstat_inc(prev, sched_info.bkl_count);
        }
 #endif
index e4d3725..eb6cb8e 100644 (file)
@@ -296,9 +296,11 @@ static void print_cpu(struct seq_file *m, int cpu)
        P(ttwu_count);
        P(ttwu_local);
 
-       P(bkl_count);
+       SEQ_printf(m, "  .%-30s: %d\n", "bkl_count",
+                               rq->rq_sched_info.bkl_count);
 
 #undef P
+#undef P64
 #endif
        spin_lock_irqsave(&sched_debug_lock, flags);
        print_cfs_stats(m, cpu);