sched: disable forced preemption by default
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 15 Oct 2007 15:00:14 +0000 (17:00 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 15 Oct 2007 15:00:14 +0000 (17:00 +0200)
Implement feature bit to disable forced preemption. This way
it can be checked whether a workload is overscheduling or not.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_fair.c

index b7dff36..0bd8f2c 100644 (file)
@@ -444,13 +444,15 @@ enum {
        SCHED_FEAT_START_DEBIT          = 2,
        SCHED_FEAT_TREE_AVG             = 4,
        SCHED_FEAT_APPROX_AVG           = 8,
+       SCHED_FEAT_WAKEUP_PREEMPT       = 16,
 };
 
 const_debug unsigned int sysctl_sched_features =
                SCHED_FEAT_NEW_FAIR_SLEEPERS    *1 |
                SCHED_FEAT_START_DEBIT          *1 |
                SCHED_FEAT_TREE_AVG             *0 |
-               SCHED_FEAT_APPROX_AVG           *0;
+               SCHED_FEAT_APPROX_AVG           *0 |
+               SCHED_FEAT_WAKEUP_PREEMPT       *1;
 
 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
 
index 3ac096e..3843ec7 100644 (file)
@@ -626,7 +626,7 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
         */
        update_curr(cfs_rq);
 
-       if (cfs_rq->nr_running > 1)
+       if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
                check_preempt_tick(cfs_rq, curr);
 }
 
@@ -828,18 +828,20 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
                return;
        }
 
-       while (!is_same_group(se, pse)) {
-               se = parent_entity(se);
-               pse = parent_entity(pse);
-       }
+       if (sched_feat(WAKEUP_PREEMPT)) {
+               while (!is_same_group(se, pse)) {
+                       se = parent_entity(se);
+                       pse = parent_entity(pse);
+               }
 
-       delta = se->vruntime - pse->vruntime;
-       gran = sysctl_sched_wakeup_granularity;
-       if (unlikely(se->load.weight != NICE_0_LOAD))
-               gran = calc_delta_fair(gran, &se->load);
+               delta = se->vruntime - pse->vruntime;
+               gran = sysctl_sched_wakeup_granularity;
+               if (unlikely(se->load.weight != NICE_0_LOAD))
+                       gran = calc_delta_fair(gran, &se->load);
 
-       if (delta > gran)
-               resched_task(curr);
+               if (delta > gran)
+                       resched_task(curr);
+       }
 }
 
 static struct task_struct *pick_next_task_fair(struct rq *rq)