aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2007-05-06 17:48:58 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:51 -0400
commit476f35348eb8d2a827765992899fea78b7dcc46f (patch)
tree81dbace9de3d4ffa3ecc67bffe265134962117bd
parentaee16b3cee2746880e40945a9b5bff4f309cfbc4 (diff)
Safer nr_node_ids and nr_node_ids determination and initial values
The nr_cpu_ids value is currently only calculated in smp_init. However, it may be needed before (SLUB needs it on kmem_cache_init!) and other kernel components may also want to allocate dynamically sized per cpu array before smp_init. So move the determination of possible cpus into sched_init() where we already loop over all possible cpus early in boot. Also initialize both nr_node_ids and nr_cpu_ids with the highest value they could take. If we have accidental users before these values are determined then the current valud of 0 may cause too small per cpu and per node arrays to be allocated. If it is set to the maximum possible then we only waste some memory for early boot users. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--init/main.c5
-rw-r--r--kernel/sched.c8
-rw-r--r--lib/cpumask.c3
-rw-r--r--mm/page_alloc.c2
4 files changed, 9 insertions, 9 deletions
diff --git a/init/main.c b/init/main.c
index df982ff5d2b0..0e22f40487bb 100644
--- a/init/main.c
+++ b/init/main.c
@@ -384,11 +384,6 @@ static void __init setup_per_cpu_areas(void)
384static void __init smp_init(void) 384static void __init smp_init(void)
385{ 385{
386 unsigned int cpu; 386 unsigned int cpu;
387 unsigned highest = 0;
388
389 for_each_cpu_mask(cpu, cpu_possible_map)
390 highest = cpu;
391 nr_cpu_ids = highest + 1;
392 387
393 /* FIXME: This should be done in userspace --RR */ 388 /* FIXME: This should be done in userspace --RR */
394 for_each_present_cpu(cpu) { 389 for_each_present_cpu(cpu) {
diff --git a/kernel/sched.c b/kernel/sched.c
index 960d7c5fca39..0227f1625a75 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5244,6 +5244,11 @@ int __init migration_init(void)
5244#endif 5244#endif
5245 5245
5246#ifdef CONFIG_SMP 5246#ifdef CONFIG_SMP
5247
5248/* Number of possible processor ids */
5249int nr_cpu_ids __read_mostly = NR_CPUS;
5250EXPORT_SYMBOL(nr_cpu_ids);
5251
5247#undef SCHED_DOMAIN_DEBUG 5252#undef SCHED_DOMAIN_DEBUG
5248#ifdef SCHED_DOMAIN_DEBUG 5253#ifdef SCHED_DOMAIN_DEBUG
5249static void sched_domain_debug(struct sched_domain *sd, int cpu) 5254static void sched_domain_debug(struct sched_domain *sd, int cpu)
@@ -6726,6 +6731,7 @@ int in_sched_functions(unsigned long addr)
6726void __init sched_init(void) 6731void __init sched_init(void)
6727{ 6732{
6728 int i, j, k; 6733 int i, j, k;
6734 int highest_cpu = 0;
6729 6735
6730 for_each_possible_cpu(i) { 6736 for_each_possible_cpu(i) {
6731 struct prio_array *array; 6737 struct prio_array *array;
@@ -6760,11 +6766,13 @@ void __init sched_init(void)
6760 // delimiter for bitsearch 6766 // delimiter for bitsearch
6761 __set_bit(MAX_PRIO, array->bitmap); 6767 __set_bit(MAX_PRIO, array->bitmap);
6762 } 6768 }
6769 highest_cpu = i;
6763 } 6770 }
6764 6771
6765 set_load_weight(&init_task); 6772 set_load_weight(&init_task);
6766 6773
6767#ifdef CONFIG_SMP 6774#ifdef CONFIG_SMP
6775 nr_cpu_ids = highest_cpu + 1;
6768 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); 6776 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
6769#endif 6777#endif
6770 6778
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 1ea2c184315d..bb4f76d3c3e7 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,9 +15,6 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18int nr_cpu_ids;
19EXPORT_SYMBOL(nr_cpu_ids);
20
21int __any_online_cpu(const cpumask_t *mask) 18int __any_online_cpu(const cpumask_t *mask)
22{ 19{
23 int cpu; 20 int cpu;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 353ce9039a86..019ceda6a8b6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -665,7 +665,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
665} 665}
666 666
667#if MAX_NUMNODES > 1 667#if MAX_NUMNODES > 1
668int nr_node_ids __read_mostly; 668int nr_node_ids __read_mostly = MAX_NUMNODES;
669EXPORT_SYMBOL(nr_node_ids); 669EXPORT_SYMBOL(nr_node_ids);
670 670
671/* 671/*