diff options
author | Andi Kleen <andi@firstfloor.org> | 2009-04-09 06:28:22 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2009-04-22 16:54:37 -0400 |
commit | 6298c512bc1007c3ff5c9ce20e6996781651cc45 (patch) | |
tree | 36e3c514d28f8d49410ccd0b683239640dd5b3ff /arch/x86 | |
parent | a939b96cccdb65df80a52447ec8e4a6d79c56dbb (diff) |
x86, mce: make polling timer interval per CPU
The polling timer while running per CPU still uses a global next_interval
variable, which lead to some CPUs either polling too fast or too slow.
This was not a serious problem because all errors get picked up eventually,
but it's still better to avoid it. Turn next_interval into a per cpu variable.
v2: Fix check_interval == 0 case (Hidetoshi Seto)
[ Impact: minor bug fix ]
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 863f89568b1a..82614f1b923a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -452,13 +452,14 @@ void mce_log_therm_throt_event(__u64 status) | |||
452 | */ | 452 | */ |
453 | 453 | ||
454 | static int check_interval = 5 * 60; /* 5 minutes */ | 454 | static int check_interval = 5 * 60; /* 5 minutes */ |
455 | static int next_interval; /* in jiffies */ | 455 | static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ |
456 | static void mcheck_timer(unsigned long); | 456 | static void mcheck_timer(unsigned long); |
457 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | 457 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
458 | 458 | ||
459 | static void mcheck_timer(unsigned long data) | 459 | static void mcheck_timer(unsigned long data) |
460 | { | 460 | { |
461 | struct timer_list *t = &per_cpu(mce_timer, data); | 461 | struct timer_list *t = &per_cpu(mce_timer, data); |
462 | int *n; | ||
462 | 463 | ||
463 | WARN_ON(smp_processor_id() != data); | 464 | WARN_ON(smp_processor_id() != data); |
464 | 465 | ||
@@ -470,14 +471,14 @@ static void mcheck_timer(unsigned long data) | |||
470 | * Alert userspace if needed. If we logged an MCE, reduce the | 471 | * Alert userspace if needed. If we logged an MCE, reduce the |
471 | * polling interval, otherwise increase the polling interval. | 472 | * polling interval, otherwise increase the polling interval. |
472 | */ | 473 | */ |
474 | n = &__get_cpu_var(next_interval); | ||
473 | if (mce_notify_user()) { | 475 | if (mce_notify_user()) { |
474 | next_interval = max(next_interval/2, HZ/100); | 476 | *n = max(*n/2, HZ/100); |
475 | } else { | 477 | } else { |
476 | next_interval = min(next_interval * 2, | 478 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); |
477 | (int)round_jiffies_relative(check_interval*HZ)); | ||
478 | } | 479 | } |
479 | 480 | ||
480 | t->expires = jiffies + next_interval; | 481 | t->expires = jiffies + *n; |
481 | add_timer(t); | 482 | add_timer(t); |
482 | } | 483 | } |
483 | 484 | ||
@@ -632,14 +633,13 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
632 | static void mce_init_timer(void) | 633 | static void mce_init_timer(void) |
633 | { | 634 | { |
634 | struct timer_list *t = &__get_cpu_var(mce_timer); | 635 | struct timer_list *t = &__get_cpu_var(mce_timer); |
636 | int *n = &__get_cpu_var(next_interval); | ||
635 | 637 | ||
636 | /* data race harmless because everyone sets to the same value */ | 638 | *n = check_interval * HZ; |
637 | if (!next_interval) | 639 | if (!*n) |
638 | next_interval = check_interval * HZ; | ||
639 | if (!next_interval) | ||
640 | return; | 640 | return; |
641 | setup_timer(t, mcheck_timer, smp_processor_id()); | 641 | setup_timer(t, mcheck_timer, smp_processor_id()); |
642 | t->expires = round_jiffies(jiffies + next_interval); | 642 | t->expires = round_jiffies(jiffies + *n); |
643 | add_timer(t); | 643 | add_timer(t); |
644 | } | 644 | } |
645 | 645 | ||
@@ -907,7 +907,6 @@ static void mce_cpu_restart(void *data) | |||
907 | /* Reinit MCEs after user configuration changes */ | 907 | /* Reinit MCEs after user configuration changes */ |
908 | static void mce_restart(void) | 908 | static void mce_restart(void) |
909 | { | 909 | { |
910 | next_interval = check_interval * HZ; | ||
911 | on_each_cpu(mce_cpu_restart, NULL, 1); | 910 | on_each_cpu(mce_cpu_restart, NULL, 1); |
912 | } | 911 | } |
913 | 912 | ||
@@ -1110,7 +1109,8 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
1110 | break; | 1109 | break; |
1111 | case CPU_DOWN_FAILED: | 1110 | case CPU_DOWN_FAILED: |
1112 | case CPU_DOWN_FAILED_FROZEN: | 1111 | case CPU_DOWN_FAILED_FROZEN: |
1113 | t->expires = round_jiffies(jiffies + next_interval); | 1112 | t->expires = round_jiffies(jiffies + |
1113 | __get_cpu_var(next_interval)); | ||
1114 | add_timer_on(t, cpu); | 1114 | add_timer_on(t, cpu); |
1115 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | 1115 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
1116 | break; | 1116 | break; |