diff options
author | Mike Travis <travis@sgi.com> | 2008-03-26 17:23:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:44:59 -0400 |
commit | e0982e90cd1ecf59818b137386b7f63debded9cc (patch) | |
tree | 3cdbfa8a69dca4a9c9596d61bffa32f1b676f09d | |
parent | 4bdbaad33d0f4d0e9818a38a825f5b75c0296a28 (diff) |
init: move setup of nr_cpu_ids to as early as possible
Move the setting of nr_cpu_ids from sched_init() to start_kernel()
so that it's available as early as possible.
Note that an arch has the option of setting it even earlier if need be,
but it should not result in a different value than the setup_nr_cpu_ids()
function.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | init/main.c | 17 | ||||
-rw-r--r-- | kernel/sched.c | 7 |
2 files changed, 17 insertions, 7 deletions
diff --git a/init/main.c b/init/main.c index 2df3f0617fdc..833a67df1f7e 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -359,6 +359,7 @@ static void __init smp_init(void) | |||
359 | #endif | 359 | #endif |
360 | 360 | ||
361 | static inline void setup_per_cpu_areas(void) { } | 361 | static inline void setup_per_cpu_areas(void) { } |
362 | static inline void setup_nr_cpu_ids(void) { } | ||
362 | static inline void smp_prepare_cpus(unsigned int maxcpus) { } | 363 | static inline void smp_prepare_cpus(unsigned int maxcpus) { } |
363 | 364 | ||
364 | #else | 365 | #else |
@@ -368,6 +369,21 @@ cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL; | |||
368 | EXPORT_SYMBOL(cpu_mask_all); | 369 | EXPORT_SYMBOL(cpu_mask_all); |
369 | #endif | 370 | #endif |
370 | 371 | ||
372 | /* Setup number of possible processor ids */ | ||
373 | int nr_cpu_ids __read_mostly = NR_CPUS; | ||
374 | EXPORT_SYMBOL(nr_cpu_ids); | ||
375 | |||
376 | /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ | ||
377 | static void __init setup_nr_cpu_ids(void) | ||
378 | { | ||
379 | int cpu, highest_cpu = 0; | ||
380 | |||
381 | for_each_possible_cpu(cpu) | ||
382 | highest_cpu = cpu; | ||
383 | |||
384 | nr_cpu_ids = highest_cpu + 1; | ||
385 | } | ||
386 | |||
371 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 387 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
372 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | 388 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
373 | 389 | ||
@@ -542,6 +558,7 @@ asmlinkage void __init start_kernel(void) | |||
542 | setup_command_line(command_line); | 558 | setup_command_line(command_line); |
543 | unwind_setup(); | 559 | unwind_setup(); |
544 | setup_per_cpu_areas(); | 560 | setup_per_cpu_areas(); |
561 | setup_nr_cpu_ids(); | ||
545 | smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ | 562 | smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
546 | 563 | ||
547 | /* | 564 | /* |
diff --git a/kernel/sched.c b/kernel/sched.c index b56d98b01267..6ab0fcbf26e9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6116,10 +6116,6 @@ void __init migration_init(void) | |||
6116 | 6116 | ||
6117 | #ifdef CONFIG_SMP | 6117 | #ifdef CONFIG_SMP |
6118 | 6118 | ||
6119 | /* Number of possible processor ids */ | ||
6120 | int nr_cpu_ids __read_mostly = NR_CPUS; | ||
6121 | EXPORT_SYMBOL(nr_cpu_ids); | ||
6122 | |||
6123 | #ifdef CONFIG_SCHED_DEBUG | 6119 | #ifdef CONFIG_SCHED_DEBUG |
6124 | 6120 | ||
6125 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6121 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
@@ -7478,7 +7474,6 @@ static void init_tg_rt_entry(struct rq *rq, struct task_group *tg, | |||
7478 | 7474 | ||
7479 | void __init sched_init(void) | 7475 | void __init sched_init(void) |
7480 | { | 7476 | { |
7481 | int highest_cpu = 0; | ||
7482 | int i, j; | 7477 | int i, j; |
7483 | unsigned long alloc_size = 0, ptr; | 7478 | unsigned long alloc_size = 0, ptr; |
7484 | 7479 | ||
@@ -7569,7 +7564,6 @@ void __init sched_init(void) | |||
7569 | #endif | 7564 | #endif |
7570 | init_rq_hrtick(rq); | 7565 | init_rq_hrtick(rq); |
7571 | atomic_set(&rq->nr_iowait, 0); | 7566 | atomic_set(&rq->nr_iowait, 0); |
7572 | highest_cpu = i; | ||
7573 | } | 7567 | } |
7574 | 7568 | ||
7575 | set_load_weight(&init_task); | 7569 | set_load_weight(&init_task); |
@@ -7579,7 +7573,6 @@ void __init sched_init(void) | |||
7579 | #endif | 7573 | #endif |
7580 | 7574 | ||
7581 | #ifdef CONFIG_SMP | 7575 | #ifdef CONFIG_SMP |
7582 | nr_cpu_ids = highest_cpu + 1; | ||
7583 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); | 7576 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); |
7584 | #endif | 7577 | #endif |
7585 | 7578 | ||