git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'sched/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
[pandora-kernel.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index
d16c8d9
..
591d5e7
100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-6774,9
+6774,9
@@
static int find_next_best_node(int node, nodemask_t *used_nodes)
min_val = INT_MAX;
min_val = INT_MAX;
- for (i = 0; i <
MAX_NUMNODES
; i++) {
+ for (i = 0; i <
nr_node_ids
; i++) {
/* Start at @node */
/* Start at @node */
- n = (node + i) %
MAX_NUMNODES
;
+ n = (node + i) %
nr_node_ids
;
if (!nr_cpus_node(n))
continue;
if (!nr_cpus_node(n))
continue;
@@
-6970,7
+6970,7
@@
static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
if (!sched_group_nodes)
continue;
if (!sched_group_nodes)
continue;
- for (i = 0; i <
MAX_NUMNODES
; i++) {
+ for (i = 0; i <
nr_node_ids
; i++) {
struct sched_group *oldsg, *sg = sched_group_nodes[i];
*nodemask = node_to_cpumask(i);
struct sched_group *oldsg, *sg = sched_group_nodes[i];
*nodemask = node_to_cpumask(i);
@@
-7163,7
+7163,7
@@
static int __build_sched_domains(const cpumask_t *cpu_map,
/*
* Allocate the per-node list of sched groups
*/
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kcalloc(
MAX_NUMNODES
, sizeof(struct sched_group *),
+ sched_group_nodes = kcalloc(
nr_node_ids
, sizeof(struct sched_group *),
GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
@@
-7302,7
+7302,7
@@
static int __build_sched_domains(const cpumask_t *cpu_map,
#endif
/* Set up physical groups */
#endif
/* Set up physical groups */
- for (i = 0; i <
MAX_NUMNODES
; i++) {
+ for (i = 0; i <
nr_node_ids
; i++) {
SCHED_CPUMASK_VAR(nodemask, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
SCHED_CPUMASK_VAR(nodemask, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
@@
-7326,7
+7326,7
@@
static int __build_sched_domains(const cpumask_t *cpu_map,
send_covered, tmpmask);
}
send_covered, tmpmask);
}
- for (i = 0; i <
MAX_NUMNODES
; i++) {
+ for (i = 0; i <
nr_node_ids
; i++) {
/* Set up node groups */
struct sched_group *sg, *prev;
SCHED_CPUMASK_VAR(nodemask, allmasks);
/* Set up node groups */
struct sched_group *sg, *prev;
SCHED_CPUMASK_VAR(nodemask, allmasks);
@@
-7365,9
+7365,9
@@
static int __build_sched_domains(const cpumask_t *cpu_map,
cpus_or(*covered, *covered, *nodemask);
prev = sg;
cpus_or(*covered, *covered, *nodemask);
prev = sg;
- for (j = 0; j <
MAX_NUMNODES
; j++) {
+ for (j = 0; j <
nr_node_ids
; j++) {
SCHED_CPUMASK_VAR(notcovered, allmasks);
SCHED_CPUMASK_VAR(notcovered, allmasks);
- int n = (i + j) %
MAX_NUMNODES
;
+ int n = (i + j) %
nr_node_ids
;
node_to_cpumask_ptr(pnodemask, n);
cpus_complement(*notcovered, *covered);
node_to_cpumask_ptr(pnodemask, n);
cpus_complement(*notcovered, *covered);
@@
-7420,7
+7420,7
@@
static int __build_sched_domains(const cpumask_t *cpu_map,
}
#ifdef CONFIG_NUMA
}
#ifdef CONFIG_NUMA
- for (i = 0; i <
MAX_NUMNODES
; i++)
+ for (i = 0; i <
nr_node_ids
; i++)
init_numa_sched_groups_power(sched_group_nodes[i]);
if (sd_allnodes) {
init_numa_sched_groups_power(sched_group_nodes[i]);
if (sd_allnodes) {