diff options
author | Zwane Mwaikambo <zwane@arm.linux.org.uk> | 2005-09-03 18:56:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:06:13 -0400 |
commit | 4ad8d38342430f8b52f7a8458dce90caf8c8ca64 (patch) | |
tree | 090c471fdb44d8fe88c52e95be0e8e43e31fcd5a /arch/i386/mach-voyager/voyager_smp.c | |
parent | d7271b14b2e9e5905aba0fbf5c4dc4f8980c0cb2 (diff) |
[PATCH] i386 boottime for_each_cpu broken
for_each_cpu walks through all processors in cpu_possible_map, which is
defined as cpu_callout_map on i386 and isn't initialised until all
processors have been booted. This breaks things which do for_each_cpu
iterations early during boot. So, define cpu_possible_map as a bitmap with
NR_CPUS bits populated. This was triggered by a patch i'm working on which
does alloc_percpu before bringing up secondary processors.
From: Alexander Nyberg <alexn@telia.com>
i386-boottime-for_each_cpu-broken.patch
i386-boottime-for_each_cpu-broken-fix.patch
The SMP version of __alloc_percpu checks the cpu_possible_map before
allocating memory for a certain cpu. With the above patches the BSP cpuid
is never set in cpu_possible_map which breaks CONFIG_SMP on uniprocessor
machines (as soon as someone tries to dereference something allocated via
__alloc_percpu, which in fact is never allocated since the cpu is not set
in cpu_possible_map).
Signed-off-by: Zwane Mwaikambo <zwane@arm.linux.org.uk>
Signed-off-by: Alexander Nyberg <alexn@telia.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/i386/mach-voyager/voyager_smp.c | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 16790b798613..46b0cf4a31e0 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c | |||
@@ -242,6 +242,8 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |||
242 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | 242 | cpumask_t cpu_callin_map = CPU_MASK_NONE; |
243 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | 243 | cpumask_t cpu_callout_map = CPU_MASK_NONE; |
244 | EXPORT_SYMBOL(cpu_callout_map); | 244 | EXPORT_SYMBOL(cpu_callout_map); |
245 | cpumask_t cpu_possible_map = CPU_MASK_ALL; | ||
246 | EXPORT_SYMBOL(cpu_possible_map); | ||
245 | 247 | ||
246 | /* The per processor IRQ masks (these are usually kept in sync) */ | 248 | /* The per processor IRQ masks (these are usually kept in sync) */ |
247 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | 249 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; |
@@ -1910,6 +1912,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
1910 | { | 1912 | { |
1911 | cpu_set(smp_processor_id(), cpu_online_map); | 1913 | cpu_set(smp_processor_id(), cpu_online_map); |
1912 | cpu_set(smp_processor_id(), cpu_callout_map); | 1914 | cpu_set(smp_processor_id(), cpu_callout_map); |
1915 | cpu_set(smp_processor_id(), cpu_possible_map); | ||
1913 | } | 1916 | } |
1914 | 1917 | ||
1915 | int __devinit | 1918 | int __devinit |