diff options
| -rw-r--r-- | arch/x86/kernel/process.c | 17 | ||||
| -rw-r--r-- | arch/x86/kernel/process_32.c | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/process_64.c | 2 | ||||
| -rw-r--r-- | include/asm-x86/acpi.h | 2 | ||||
| -rw-r--r-- | include/asm-x86/cpufeature.h | 1 | ||||
| -rw-r--r-- | include/asm-x86/idle.h | 2 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 12 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 10 | ||||
| -rw-r--r-- | kernel/time/tick-internal.h | 7 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 11 |
10 files changed, 50 insertions, 15 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7fc4d5b0a6a0..876e91890777 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -246,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
| 246 | return 1; | 246 | return 1; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
| 250 | static int c1e_detected; | ||
| 251 | |||
| 252 | void c1e_remove_cpu(int cpu) | ||
| 253 | { | ||
| 254 | cpu_clear(cpu, c1e_mask); | ||
| 255 | } | ||
| 256 | |||
| 249 | /* | 257 | /* |
| 250 | * C1E aware idle routine. We check for C1E active in the interrupt | 258 | * C1E aware idle routine. We check for C1E active in the interrupt |
| 251 | * pending message MSR. If we detect C1E, then we handle it the same | 259 | * pending message MSR. If we detect C1E, then we handle it the same |
| @@ -253,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
| 253 | */ | 261 | */ |
| 254 | static void c1e_idle(void) | 262 | static void c1e_idle(void) |
| 255 | { | 263 | { |
| 256 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
| 257 | static int c1e_detected; | ||
| 258 | |||
| 259 | if (need_resched()) | 264 | if (need_resched()) |
| 260 | return; | 265 | return; |
| 261 | 266 | ||
| @@ -265,8 +270,10 @@ static void c1e_idle(void) | |||
| 265 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 270 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
| 266 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 271 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
| 267 | c1e_detected = 1; | 272 | c1e_detected = 1; |
| 268 | mark_tsc_unstable("TSC halt in C1E"); | 273 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) |
| 269 | printk(KERN_INFO "System has C1E enabled\n"); | 274 | mark_tsc_unstable("TSC halt in AMD C1E"); |
| 275 | printk(KERN_INFO "System has AMD C1E enabled\n"); | ||
| 276 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); | ||
| 270 | } | 277 | } |
| 271 | } | 278 | } |
| 272 | 279 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3b7a1ddcc0bc..4b3cfdf54216 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -88,6 +88,7 @@ static void cpu_exit_clear(void) | |||
| 88 | cpu_clear(cpu, cpu_callin_map); | 88 | cpu_clear(cpu, cpu_callin_map); |
| 89 | 89 | ||
| 90 | numa_remove_cpu(cpu); | 90 | numa_remove_cpu(cpu); |
| 91 | c1e_remove_cpu(cpu); | ||
| 91 | } | 92 | } |
| 92 | 93 | ||
| 93 | /* We don't actually take CPU down, just spin without interrupts. */ | 94 | /* We don't actually take CPU down, just spin without interrupts. */ |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71553b664e2a..e12e0e4dd256 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -93,6 +93,8 @@ DECLARE_PER_CPU(int, cpu_state); | |||
| 93 | static inline void play_dead(void) | 93 | static inline void play_dead(void) |
| 94 | { | 94 | { |
| 95 | idle_task_exit(); | 95 | idle_task_exit(); |
| 96 | c1e_remove_cpu(raw_smp_processor_id()); | ||
| 97 | |||
| 96 | mb(); | 98 | mb(); |
| 97 | /* Ack it */ | 99 | /* Ack it */ |
| 98 | __get_cpu_var(cpu_state) = CPU_DEAD; | 100 | __get_cpu_var(cpu_state) = CPU_DEAD; |
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 635d764dc13e..35d1743b57ac 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h | |||
| @@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
| 140 | boot_cpu_data.x86_model <= 0x05 && | 140 | boot_cpu_data.x86_model <= 0x05 && |
| 141 | boot_cpu_data.x86_mask < 0x0A) | 141 | boot_cpu_data.x86_mask < 0x0A) |
| 142 | return 1; | 142 | return 1; |
| 143 | else if (boot_cpu_has(X86_FEATURE_AMDC1E)) | ||
| 144 | return 1; | ||
| 143 | else | 145 | else |
| 144 | return max_cstate; | 146 | return max_cstate; |
| 145 | } | 147 | } |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 9489283a4bcf..cfcfb0a806ba 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
| @@ -81,6 +81,7 @@ | |||
| 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ |
| 82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | 82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ |
| 83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | 83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
| 84 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ | ||
| 84 | 85 | ||
| 85 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 86 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
| 86 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 87 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h index d240e5b30a45..cbb649123612 100644 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h | |||
| @@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n); | |||
| 10 | void enter_idle(void); | 10 | void enter_idle(void); |
| 11 | void exit_idle(void); | 11 | void exit_idle(void); |
| 12 | 12 | ||
| 13 | void c1e_remove_cpu(int cpu); | ||
| 14 | |||
| 13 | #endif | 15 | #endif |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f1f3eee28113..bd7034542399 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -235,7 +235,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
| 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
| 237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
| 238 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (bc->mode == TICKDEV_MODE_PERIODIC) |
| 239 | clockevents_shutdown(dev); | 239 | clockevents_shutdown(dev); |
| 240 | } | 240 | } |
| 241 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 241 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
| @@ -245,7 +245,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 245 | if (!tick_broadcast_force && | 245 | if (!tick_broadcast_force && |
| 246 | cpu_isset(cpu, tick_broadcast_mask)) { | 246 | cpu_isset(cpu, tick_broadcast_mask)) { |
| 247 | cpu_clear(cpu, tick_broadcast_mask); | 247 | cpu_clear(cpu, tick_broadcast_mask); |
| 248 | if (td->mode == TICKDEV_MODE_PERIODIC) | 248 | if (bc->mode == TICKDEV_MODE_PERIODIC) |
| 249 | tick_setup_periodic(dev, 0); | 249 | tick_setup_periodic(dev, 0); |
| 250 | } | 250 | } |
| 251 | break; | 251 | break; |
| @@ -575,4 +575,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 575 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 575 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 576 | } | 576 | } |
| 577 | 577 | ||
| 578 | /* | ||
| 579 | * Check, whether the broadcast device is in one shot mode | ||
| 580 | */ | ||
| 581 | int tick_broadcast_oneshot_active(void) | ||
| 582 | { | ||
| 583 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
| 584 | } | ||
| 585 | |||
| 578 | #endif | 586 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 019315ebf9de..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
| 33 | */ | 33 | */ |
| 34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
| 35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
| 36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
| 37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
| 38 | 38 | ||
| 39 | /* | 39 | /* |
| @@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
| 109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
| 110 | return; | 110 | return; |
| 111 | 111 | ||
| 112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
| 113 | !tick_broadcast_oneshot_active()) { | ||
| 113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
| 114 | } else { | 115 | } else { |
| 115 | unsigned long seq; | 116 | unsigned long seq; |
| @@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
| 148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
| 149 | * this cpu: | 150 | * this cpu: |
| 150 | */ | 151 | */ |
| 151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
| 152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
| 153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
| 154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
| @@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
| 300 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
| 301 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
| 302 | 303 | ||
| 303 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
| 305 | TICK_DO_TIMER_NONE; | ||
| 304 | } | 306 | } |
| 305 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
| 306 | } | 308 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 6e9db9734aa6..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -1,6 +1,10 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
| 3 | */ | 3 | */ |
| 4 | |||
| 5 | #define TICK_DO_TIMER_NONE -1 | ||
| 6 | #define TICK_DO_TIMER_BOOT -2 | ||
| 7 | |||
| 4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
| 5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
| 6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
| @@ -31,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
| 31 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
| 32 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
| 33 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
| 38 | extern int tick_broadcast_oneshot_active(void); | ||
| 34 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
| 35 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
| 36 | { | 41 | { |
| @@ -39,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 39 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
| 40 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
| 41 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
| 42 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
| 43 | 49 | ||
| 44 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
| @@ -68,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
| 68 | { | 74 | { |
| 69 | return 0; | 75 | return 0; |
| 70 | } | 76 | } |
| 77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
| 71 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
| 72 | 79 | ||
| 73 | /* | 80 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b0468568b..39019b3f7621 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
| 75 | incr * ticks); | 75 | incr * ticks); |
| 76 | } | 76 | } |
| 77 | do_timer(++ticks); | 77 | do_timer(++ticks); |
| 78 | |||
| 79 | /* Keep the tick_next_period variable up to date */ | ||
| 80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
| 78 | } | 81 | } |
| 79 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&xtime_lock); |
| 80 | } | 83 | } |
| @@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 221 | */ | 224 | */ |
| 222 | if (unlikely(!cpu_online(cpu))) { | 225 | if (unlikely(!cpu_online(cpu))) { |
| 223 | if (cpu == tick_do_timer_cpu) | 226 | if (cpu == tick_do_timer_cpu) |
| 224 | tick_do_timer_cpu = -1; | 227 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 225 | } | 228 | } |
| 226 | 229 | ||
| 227 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 230 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
| @@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 303 | * invoked. | 306 | * invoked. |
| 304 | */ | 307 | */ |
| 305 | if (cpu == tick_do_timer_cpu) | 308 | if (cpu == tick_do_timer_cpu) |
| 306 | tick_do_timer_cpu = -1; | 309 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 307 | 310 | ||
| 308 | ts->idle_sleeps++; | 311 | ts->idle_sleeps++; |
| 309 | 312 | ||
| @@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
| 468 | * this duty, then the jiffies update is still serialized by | 471 | * this duty, then the jiffies update is still serialized by |
| 469 | * xtime_lock. | 472 | * xtime_lock. |
| 470 | */ | 473 | */ |
| 471 | if (unlikely(tick_do_timer_cpu == -1)) | 474 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 472 | tick_do_timer_cpu = cpu; | 475 | tick_do_timer_cpu = cpu; |
| 473 | 476 | ||
| 474 | /* Check, if the jiffies need an update */ | 477 | /* Check, if the jiffies need an update */ |
| @@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
| 570 | * this duty, then the jiffies update is still serialized by | 573 | * this duty, then the jiffies update is still serialized by |
| 571 | * xtime_lock. | 574 | * xtime_lock. |
| 572 | */ | 575 | */ |
| 573 | if (unlikely(tick_do_timer_cpu == -1)) | 576 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
| 574 | tick_do_timer_cpu = cpu; | 577 | tick_do_timer_cpu = cpu; |
| 575 | #endif | 578 | #endif |
| 576 | 579 | ||
