diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-06-09 13:15:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 01:47:18 -0400 |
commit | aa276e1cafb3ce9d01d1e837bcd67e92616013ac (patch) | |
tree | d0ecb8fe8ae70fdaed8d97d317199180882671b5 /arch/x86/kernel/process.c | |
parent | 00dba56465228825ea806e3a7fc0aa6bba7bdc6c (diff) |
x86, clockevents: add C1E aware idle function
C1E on AMD machines is like C3 but without control from the OS. Up to
now we disabled the local apic timer for those machines as it stops
when the CPU goes into C1E. This excludes those machines from high
resolution timers / dynamic ticks, which hurts especially X2 based
laptops.
The current boot time C1E detection has another, more serious flaw
as well: some BIOSes do not enable C1E until the ACPI processor module
is loaded. This causes systems to stop working after that point.
To work nicely with C1E enabled machines we use a separate idle
function, which checks on idle entry whether C1E was enabled in the
Interrupt Pending Message MSR. This allows us to do timer broadcasting
for C1E and covers the late enablement of C1E as well.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r-- | arch/x86/kernel/process.c | 66 |
1 files changed, 66 insertions, 0 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 9fea14607dfe..68ad3539b143 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/pm.h> | 8 | #include <linux/pm.h> |
9 | #include <linux/clockchips.h> | ||
9 | 10 | ||
10 | struct kmem_cache *task_xstate_cachep; | 11 | struct kmem_cache *task_xstate_cachep; |
11 | 12 | ||
@@ -219,6 +220,68 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | |||
219 | return (edx & MWAIT_EDX_C1); | 220 | return (edx & MWAIT_EDX_C1); |
220 | } | 221 | } |
221 | 222 | ||
223 | /* | ||
224 | * Check for AMD CPUs, which have potentially C1E support | ||
225 | */ | ||
226 | static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | ||
227 | { | ||
228 | if (c->x86_vendor != X86_VENDOR_AMD) | ||
229 | return 0; | ||
230 | |||
231 | if (c->x86 < 0x0F) | ||
232 | return 0; | ||
233 | |||
234 | /* Family 0x0f models < rev F do not have C1E */ | ||
235 | if (c->x86 == 0x0f && c->x86_model < 0x40) | ||
236 | return 0; | ||
237 | |||
238 | return 1; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * C1E aware idle routine. We check for C1E active in the interrupt | ||
243 | * pending message MSR. If we detect C1E, then we handle it the same | ||
244 | * way as C3 power states (local apic timer and TSC stop) | ||
245 | */ | ||
246 | static void c1e_idle(void) | ||
247 | { | ||
248 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
249 | static int c1e_detected; | ||
250 | |||
251 | if (need_resched()) | ||
252 | return; | ||
253 | |||
254 | if (!c1e_detected) { | ||
255 | u32 lo, hi; | ||
256 | |||
257 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | ||
258 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | ||
259 | c1e_detected = 1; | ||
260 | mark_tsc_unstable("TSC halt in C1E"); | ||
261 | printk(KERN_INFO "System has C1E enabled\n"); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | if (c1e_detected) { | ||
266 | int cpu = smp_processor_id(); | ||
267 | |||
268 | if (!cpu_isset(cpu, c1e_mask)) { | ||
269 | cpu_set(cpu, c1e_mask); | ||
270 | /* Force broadcast so ACPI can not interfere */ | ||
271 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, | ||
272 | &cpu); | ||
273 | printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", | ||
274 | cpu); | ||
275 | } | ||
276 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | ||
277 | default_idle(); | ||
278 | local_irq_disable(); | ||
279 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | ||
280 | local_irq_enable(); | ||
281 | } else | ||
282 | default_idle(); | ||
283 | } | ||
284 | |||
222 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 285 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
223 | { | 286 | { |
224 | #ifdef CONFIG_X86_SMP | 287 | #ifdef CONFIG_X86_SMP |
@@ -236,6 +299,9 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
236 | */ | 299 | */ |
237 | printk(KERN_INFO "using mwait in idle threads.\n"); | 300 | printk(KERN_INFO "using mwait in idle threads.\n"); |
238 | pm_idle = mwait_idle; | 301 | pm_idle = mwait_idle; |
302 | } else if (check_c1e_idle(c)) { | ||
303 | printk(KERN_INFO "using C1E aware idle routine\n"); | ||
304 | pm_idle = c1e_idle; | ||
239 | } else | 305 | } else |
240 | pm_idle = default_idle; | 306 | pm_idle = default_idle; |
241 | } | 307 | } |