diff options
| -rw-r--r-- | arch/x86/kernel/apic_32.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/apic_64.c | 26 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 30 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/amd_64.c | 25 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 66 | ||||
| -rw-r--r-- | include/asm-x86/apic.h | 3 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 6 |
7 files changed, 73 insertions, 88 deletions
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 4b99b1bdeb6c..c44206e731d4 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
| @@ -64,9 +64,8 @@ static int enable_local_apic __initdata; | |||
| 64 | 64 | ||
| 65 | /* Local APIC timer verification ok */ | 65 | /* Local APIC timer verification ok */ |
| 66 | static int local_apic_timer_verify_ok; | 66 | static int local_apic_timer_verify_ok; |
| 67 | /* Disable local APIC timer from the kernel commandline or via dmi quirk | 67 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ |
| 68 | or using CPU MSR check */ | 68 | static int local_apic_timer_disabled; |
| 69 | int local_apic_timer_disabled; | ||
| 70 | /* Local APIC timer works in C2 */ | 69 | /* Local APIC timer works in C2 */ |
| 71 | int local_apic_timer_c2_ok; | 70 | int local_apic_timer_c2_ok; |
| 72 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | 71 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 0633cfd0dc29..a5cc8447cf4d 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
| @@ -43,7 +43,7 @@ | |||
| 43 | #include <mach_ipi.h> | 43 | #include <mach_ipi.h> |
| 44 | #include <mach_apic.h> | 44 | #include <mach_apic.h> |
| 45 | 45 | ||
| 46 | int disable_apic_timer __cpuinitdata; | 46 | static int disable_apic_timer __cpuinitdata; |
| 47 | static int apic_calibrate_pmtmr __initdata; | 47 | static int apic_calibrate_pmtmr __initdata; |
| 48 | int disable_apic; | 48 | int disable_apic; |
| 49 | 49 | ||
| @@ -422,32 +422,8 @@ void __init setup_boot_APIC_clock(void) | |||
| 422 | setup_APIC_timer(); | 422 | setup_APIC_timer(); |
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | /* | ||
| 426 | * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the | ||
| 427 | * C1E flag only in the secondary CPU, so when we detect the wreckage | ||
| 428 | * we already have enabled the boot CPU local apic timer. Check, if | ||
| 429 | * disable_apic_timer is set and the DUMMY flag is cleared. If yes, | ||
| 430 | * set the DUMMY flag again and force the broadcast mode in the | ||
| 431 | * clockevents layer. | ||
| 432 | */ | ||
| 433 | static void __cpuinit check_boot_apic_timer_broadcast(void) | ||
| 434 | { | ||
| 435 | if (!disable_apic_timer || | ||
| 436 | (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY)) | ||
| 437 | return; | ||
| 438 | |||
| 439 | printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n"); | ||
| 440 | lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; | ||
| 441 | |||
| 442 | local_irq_enable(); | ||
| 443 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, | ||
| 444 | &boot_cpu_physical_apicid); | ||
| 445 | local_irq_disable(); | ||
| 446 | } | ||
| 447 | |||
| 448 | void __cpuinit setup_secondary_APIC_clock(void) | 425 | void __cpuinit setup_secondary_APIC_clock(void) |
| 449 | { | 426 | { |
| 450 | check_boot_apic_timer_broadcast(); | ||
| 451 | setup_APIC_timer(); | 427 | setup_APIC_timer(); |
| 452 | } | 428 | } |
| 453 | 429 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e76b49e7a916..acc891ae5901 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -24,31 +24,6 @@ | |||
| 24 | extern void vide(void); | 24 | extern void vide(void); |
| 25 | __asm__(".align 4\nvide: ret"); | 25 | __asm__(".align 4\nvide: ret"); |
| 26 | 26 | ||
| 27 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 28 | |||
| 29 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ | ||
| 30 | static __cpuinit int amd_apic_timer_broken(struct cpuinfo_x86 *c) | ||
| 31 | { | ||
| 32 | u32 lo, hi; | ||
| 33 | |||
| 34 | if (c->x86 < 0x0F) | ||
| 35 | return 0; | ||
| 36 | |||
| 37 | /* Family 0x0f models < rev F do not have this MSR */ | ||
| 38 | if (c->x86 == 0x0f && c->x86_model < 0x40) | ||
| 39 | return 0; | ||
| 40 | |||
| 41 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | ||
| 42 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | ||
| 43 | if (smp_processor_id() != boot_cpu_physical_apicid) | ||
| 44 | printk(KERN_INFO "AMD C1E detected late. " | ||
| 45 | "Force timer broadcast.\n"); | ||
| 46 | return 1; | ||
| 47 | } | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | #endif | ||
| 51 | |||
| 52 | int force_mwait __cpuinitdata; | 27 | int force_mwait __cpuinitdata; |
| 53 | 28 | ||
| 54 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 29 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
| @@ -285,11 +260,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 285 | num_cache_leaves = 3; | 260 | num_cache_leaves = 3; |
| 286 | } | 261 | } |
| 287 | 262 | ||
| 288 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 289 | if (amd_apic_timer_broken(c)) | ||
| 290 | local_apic_timer_disabled = 1; | ||
| 291 | #endif | ||
| 292 | |||
| 293 | /* K6s reports MCEs but don't actually have all the MSRs */ | 263 | /* K6s reports MCEs but don't actually have all the MSRs */ |
| 294 | if (c->x86 < 6) | 264 | if (c->x86 < 6) |
| 295 | clear_cpu_cap(c, X86_FEATURE_MCE); | 265 | clear_cpu_cap(c, X86_FEATURE_MCE); |
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c index f5fc161d8f2a..f8d20588bde9 100644 --- a/arch/x86/kernel/cpu/amd_64.c +++ b/arch/x86/kernel/cpu/amd_64.c | |||
| @@ -110,28 +110,6 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | |||
| 110 | #endif | 110 | #endif |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ | ||
| 114 | static __cpuinit int amd_apic_timer_broken(struct cpuinfo_x86 *c) | ||
| 115 | { | ||
| 116 | u32 lo, hi; | ||
| 117 | |||
| 118 | if (c->x86 < 0x0F) | ||
| 119 | return 0; | ||
| 120 | |||
| 121 | /* Family 0x0f models < rev F do not have this MSR */ | ||
| 122 | if (c->x86 == 0x0f && c->x86_model < 0x40) | ||
| 123 | return 0; | ||
| 124 | |||
| 125 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | ||
| 126 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | ||
| 127 | if (smp_processor_id() != boot_cpu_physical_apicid) | ||
| 128 | printk(KERN_INFO "AMD C1E detected late. " | ||
| 129 | "Force timer broadcast.\n"); | ||
| 130 | return 1; | ||
| 131 | } | ||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 113 | void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
| 136 | { | 114 | { |
| 137 | early_init_amd_mc(c); | 115 | early_init_amd_mc(c); |
| @@ -212,9 +190,6 @@ void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 212 | if (c->x86 == 0x10) | 190 | if (c->x86 == 0x10) |
| 213 | amd_enable_pci_ext_cfg(c); | 191 | amd_enable_pci_ext_cfg(c); |
| 214 | 192 | ||
| 215 | if (amd_apic_timer_broken(c)) | ||
| 216 | disable_apic_timer = 1; | ||
| 217 | |||
| 218 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | 193 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { |
| 219 | unsigned long long tseg; | 194 | unsigned long long tseg; |
| 220 | 195 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 9fea14607dfe..68ad3539b143 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
| 7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
| 8 | #include <linux/pm.h> | 8 | #include <linux/pm.h> |
| 9 | #include <linux/clockchips.h> | ||
| 9 | 10 | ||
| 10 | struct kmem_cache *task_xstate_cachep; | 11 | struct kmem_cache *task_xstate_cachep; |
| 11 | 12 | ||
| @@ -219,6 +220,68 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | |||
| 219 | return (edx & MWAIT_EDX_C1); | 220 | return (edx & MWAIT_EDX_C1); |
| 220 | } | 221 | } |
| 221 | 222 | ||
| 223 | /* | ||
| 224 | * Check for AMD CPUs, which have potentially C1E support | ||
| 225 | */ | ||
| 226 | static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | ||
| 227 | { | ||
| 228 | if (c->x86_vendor != X86_VENDOR_AMD) | ||
| 229 | return 0; | ||
| 230 | |||
| 231 | if (c->x86 < 0x0F) | ||
| 232 | return 0; | ||
| 233 | |||
| 234 | /* Family 0x0f models < rev F do not have C1E */ | ||
| 235 | if (c->x86 == 0x0f && c->x86_model < 0x40) | ||
| 236 | return 0; | ||
| 237 | |||
| 238 | return 1; | ||
| 239 | } | ||
| 240 | |||
| 241 | /* | ||
| 242 | * C1E aware idle routine. We check for C1E active in the interrupt | ||
| 243 | * pending message MSR. If we detect C1E, then we handle it the same | ||
| 244 | * way as C3 power states (local apic timer and TSC stop) | ||
| 245 | */ | ||
| 246 | static void c1e_idle(void) | ||
| 247 | { | ||
| 248 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
| 249 | static int c1e_detected; | ||
| 250 | |||
| 251 | if (need_resched()) | ||
| 252 | return; | ||
| 253 | |||
| 254 | if (!c1e_detected) { | ||
| 255 | u32 lo, hi; | ||
| 256 | |||
| 257 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | ||
| 258 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | ||
| 259 | c1e_detected = 1; | ||
| 260 | mark_tsc_unstable("TSC halt in C1E"); | ||
| 261 | printk(KERN_INFO "System has C1E enabled\n"); | ||
| 262 | } | ||
| 263 | } | ||
| 264 | |||
| 265 | if (c1e_detected) { | ||
| 266 | int cpu = smp_processor_id(); | ||
| 267 | |||
| 268 | if (!cpu_isset(cpu, c1e_mask)) { | ||
| 269 | cpu_set(cpu, c1e_mask); | ||
| 270 | /* Force broadcast so ACPI can not interfere */ | ||
| 271 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, | ||
| 272 | &cpu); | ||
| 273 | printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", | ||
| 274 | cpu); | ||
| 275 | } | ||
| 276 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | ||
| 277 | default_idle(); | ||
| 278 | local_irq_disable(); | ||
| 279 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | ||
| 280 | local_irq_enable(); | ||
| 281 | } else | ||
| 282 | default_idle(); | ||
| 283 | } | ||
| 284 | |||
| 222 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 285 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
| 223 | { | 286 | { |
| 224 | #ifdef CONFIG_X86_SMP | 287 | #ifdef CONFIG_X86_SMP |
| @@ -236,6 +299,9 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
| 236 | */ | 299 | */ |
| 237 | printk(KERN_INFO "using mwait in idle threads.\n"); | 300 | printk(KERN_INFO "using mwait in idle threads.\n"); |
| 238 | pm_idle = mwait_idle; | 301 | pm_idle = mwait_idle; |
| 302 | } else if (check_c1e_idle(c)) { | ||
| 303 | printk(KERN_INFO "using C1E aware idle routine\n"); | ||
| 304 | pm_idle = c1e_idle; | ||
| 239 | } else | 305 | } else |
| 240 | pm_idle = default_idle; | 306 | pm_idle = default_idle; |
| 241 | } | 307 | } |
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index be9639a9a186..3c387cda95fa 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
| @@ -38,12 +38,9 @@ extern void generic_apic_probe(void); | |||
| 38 | extern int apic_verbosity; | 38 | extern int apic_verbosity; |
| 39 | extern int timer_over_8254; | 39 | extern int timer_over_8254; |
| 40 | extern int local_apic_timer_c2_ok; | 40 | extern int local_apic_timer_c2_ok; |
| 41 | extern int local_apic_timer_disabled; | ||
| 42 | 41 | ||
| 43 | extern int apic_runs_main_timer; | ||
| 44 | extern int ioapic_force; | 42 | extern int ioapic_force; |
| 45 | extern int disable_apic; | 43 | extern int disable_apic; |
| 46 | extern int disable_apic_timer; | ||
| 47 | 44 | ||
| 48 | /* | 45 | /* |
| 49 | * Basic functions accessing APICs. | 46 | * Basic functions accessing APICs. |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 57a1f02e5ec0..67f80c261709 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | struct tick_device tick_broadcast_device; | 30 | struct tick_device tick_broadcast_device; |
| 31 | static cpumask_t tick_broadcast_mask; | 31 | static cpumask_t tick_broadcast_mask; |
| 32 | static DEFINE_SPINLOCK(tick_broadcast_lock); | 32 | static DEFINE_SPINLOCK(tick_broadcast_lock); |
| 33 | static int tick_broadcast_force; | ||
| 33 | 34 | ||
| 34 | #ifdef CONFIG_TICK_ONESHOT | 35 | #ifdef CONFIG_TICK_ONESHOT |
| 35 | static void tick_broadcast_clear_oneshot(int cpu); | 36 | static void tick_broadcast_clear_oneshot(int cpu); |
| @@ -232,10 +233,11 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 232 | CLOCK_EVT_MODE_SHUTDOWN); | 233 | CLOCK_EVT_MODE_SHUTDOWN); |
| 233 | } | 234 | } |
| 234 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 235 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
| 235 | dev->features |= CLOCK_EVT_FEAT_DUMMY; | 236 | tick_broadcast_force = 1; |
| 236 | break; | 237 | break; |
| 237 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | 238 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: |
| 238 | if (cpu_isset(cpu, tick_broadcast_mask)) { | 239 | if (!tick_broadcast_force && |
| 240 | cpu_isset(cpu, tick_broadcast_mask)) { | ||
| 239 | cpu_clear(cpu, tick_broadcast_mask); | 241 | cpu_clear(cpu, tick_broadcast_mask); |
| 240 | if (td->mode == TICKDEV_MODE_PERIODIC) | 242 | if (td->mode == TICKDEV_MODE_PERIODIC) |
| 241 | tick_setup_periodic(dev, 0); | 243 | tick_setup_periodic(dev, 0); |
