aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-09-22 12:54:29 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-09-23 05:38:52 -0400
commit4faac97d44ac27bdbb010a9c3597401a8f89341f (patch)
tree680f128d2bf1cec0fdc77f7a8cb3f3f54264d282 /arch/x86
parent6441402b1f173fa38e561d3cee7c01c32e5281ad (diff)
x86: prevent stale state of c1e_mask across CPU offline/online
Impact: hang which happens across CPU offline/online on AMD C1E systems. When a CPU goes offline then the corresponding bit in the broadcast mask is cleared. For AMD C1E enabled CPUs we do not reenable the broadcast when the CPU comes online again as we do not clear the corresponding bit in the c1e_mask, which keeps track which CPUs have been switched to broadcast already. So on those !$@#& machines we never switch back to broadcasting after a CPU offline/online cycle. Clear the bit when the CPU plays dead. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/process.c11
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c2
3 files changed, 11 insertions, 3 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 7fc4d5b0a6a0..2e2247117f6e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -246,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
246 return 1; 246 return 1;
247} 247}
248 248
249static cpumask_t c1e_mask = CPU_MASK_NONE;
250static int c1e_detected;
251
252void c1e_remove_cpu(int cpu)
253{
254 cpu_clear(cpu, c1e_mask);
255}
256
249/* 257/*
250 * C1E aware idle routine. We check for C1E active in the interrupt 258 * C1E aware idle routine. We check for C1E active in the interrupt
251 * pending message MSR. If we detect C1E, then we handle it the same 259 * pending message MSR. If we detect C1E, then we handle it the same
@@ -253,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
253 */ 261 */
254static void c1e_idle(void) 262static void c1e_idle(void)
255{ 263{
256 static cpumask_t c1e_mask = CPU_MASK_NONE;
257 static int c1e_detected;
258
259 if (need_resched()) 264 if (need_resched())
260 return; 265 return;
261 266
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 3b7a1ddcc0bc..4b3cfdf54216 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -88,6 +88,7 @@ static void cpu_exit_clear(void)
88 cpu_clear(cpu, cpu_callin_map); 88 cpu_clear(cpu, cpu_callin_map);
89 89
90 numa_remove_cpu(cpu); 90 numa_remove_cpu(cpu);
91 c1e_remove_cpu(cpu);
91} 92}
92 93
93/* We don't actually take CPU down, just spin without interrupts. */ 94/* We don't actually take CPU down, just spin without interrupts. */
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71553b664e2a..e12e0e4dd256 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -93,6 +93,8 @@ DECLARE_PER_CPU(int, cpu_state);
93static inline void play_dead(void) 93static inline void play_dead(void)
94{ 94{
95 idle_task_exit(); 95 idle_task_exit();
96 c1e_remove_cpu(raw_smp_processor_id());
97
96 mb(); 98 mb();
97 /* Ack it */ 99 /* Ack it */
98 __get_cpu_var(cpu_state) = CPU_DEAD; 100 __get_cpu_var(cpu_state) = CPU_DEAD;