aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorBorislav Petkov <bp@amd64.org>2010-08-19 14:10:29 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-08-19 17:47:43 -0400
commitd7c53c9e822a4fefa13a0cae76f3190bfd0d5c11 (patch)
tree9f4910ad6e95470bb67f504a09b096a4c2822a8d /arch/x86
parent8848a91068c018bc91f597038a0f41462a0f88a4 (diff)
x86, hotplug: Serialize CPU hotplug to avoid bringup concurrency issues
When testing cpu hotplug code on 32-bit we kept hitting the "CPU%d: Stuck ??" message due to multiple cores concurrently accessing the cpu_callin_mask, among others. Since these codepaths are not protected from concurrent access due to the fact that there's no sane reason for making an already complex code unnecessarily more complex - we hit the issue only when insanely switching cores off- and online - serialize hotplugging cores on the sysfs level and be done with it. [ v2.1: fix !HOTPLUG_CPU build ] Cc: <stable@kernel.org> Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> LKML-Reference: <20100819181029.GC17171@aftab> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/kernel/smpboot.c19
2 files changed, 24 insertions, 0 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a84fc34c8f77..ac7827fc0823 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS
245 245
246config KTIME_SCALAR 246config KTIME_SCALAR
247 def_bool X86_32 247 def_bool X86_32
248
249config ARCH_CPU_PROBE_RELEASE
250 def_bool y
251 depends on HOTPLUG_CPU
252
248source "init/Kconfig" 253source "init/Kconfig"
249source "kernel/Kconfig.freezer" 254source "kernel/Kconfig.freezer"
250 255
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index abf4a86ffc54..8b3bfc4dd708 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -90,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
90static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 90static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
91#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 91#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
92#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 92#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
93
94/*
95 * We need this for trampoline_base protection from concurrent accesses when
96 * off- and onlining cores wildly.
97 */
98static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
99
100void cpu_hotplug_driver_lock()
101{
102 mutex_lock(&x86_cpu_hotplug_driver_mutex);
103}
104
105void cpu_hotplug_driver_unlock()
106{
107 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
108}
109
110ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
111ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
93#else 112#else
94static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 113static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
95#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 114#define get_idle_for_cpu(x) (idle_thread_array[(x)])