sched: reduce stack size requirements in kernel/sched.c
authorMike Travis <travis@sgi.com>
Mon, 24 Nov 2008 16:05:02 +0000 (02:35 +1030)
committerIngo Molnar <mingo@elte.hu>
Mon, 24 Nov 2008 16:49:27 +0000 (17:49 +0100)
Impact: cleanup

  * use node_to_cpumask_ptr in place of node_to_cpumask to reduce stack
    requirements in sched.c

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index bb82765..dd22cec 100644 (file)
@@ -6110,8 +6110,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 
        do {
                /* On same node? */
-               mask = node_to_cpumask(cpu_to_node(dead_cpu));
-               cpus_and(mask, mask, p->cpus_allowed);
+               node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu));
+
+               cpus_and(mask, *pnodemask, p->cpus_allowed);
                dest_cpu = any_online_cpu(mask);
 
                /* On any allowed CPU? */
@@ -7098,9 +7099,9 @@ static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
                                 struct sched_group **sg, cpumask_t *nodemask)
 {
        int group;
+       node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));
 
-       *nodemask = node_to_cpumask(cpu_to_node(cpu));
-       cpus_and(*nodemask, *nodemask, *cpu_map);
+       cpus_and(*nodemask, *pnodemask, *cpu_map);
        group = first_cpu(*nodemask);
 
        if (sg)
@@ -7150,9 +7151,9 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
 
                for (i = 0; i < nr_node_ids; i++) {
                        struct sched_group *oldsg, *sg = sched_group_nodes[i];
+                       node_to_cpumask_ptr(pnodemask, i);
 
-                       *nodemask = node_to_cpumask(i);
-                       cpus_and(*nodemask, *nodemask, *cpu_map);
+                       cpus_and(*nodemask, *pnodemask, *cpu_map);
                        if (cpus_empty(*nodemask))
                                continue;