aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-05-24 13:54:51 -0400
committerTony Luck <tony.luck@intel.com>2012-05-30 17:40:01 -0400
commit82f7af09e6fb58fb725c850d725d5e8780a9bec2 (patch)
tree3dd5ca6521c8042be3227a6ec51c485caf4035af
parent37c3459b67dd5a396a968e819cf4a86d24ac9ace (diff)
x86/mce: Cleanup timer mess
Use unsigned long for dealing with jiffies not int. Rename the callback to something sensible. Use __this_cpu_read/write for accessing per cpu data. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Borislav Petkov <borislav.petkov@amd.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5f793e6c854b..98003bfc5556 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1243,15 +1243,15 @@ void mce_log_therm_throt_event(__u64 status)
1243 * poller finds an MCE, poll 2x faster. When the poller finds no more 1243 * poller finds an MCE, poll 2x faster. When the poller finds no more
1244 * errors, poll 2x slower (up to check_interval seconds). 1244 * errors, poll 2x slower (up to check_interval seconds).
1245 */ 1245 */
1246static int check_interval = 5 * 60; /* 5 minutes */ 1246static unsigned long check_interval = 5 * 60; /* 5 minutes */
1247 1247
1248static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ 1248static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1249static DEFINE_PER_CPU(struct timer_list, mce_timer); 1249static DEFINE_PER_CPU(struct timer_list, mce_timer);
1250 1250
1251static void mce_start_timer(unsigned long data) 1251static void mce_timer_fn(unsigned long data)
1252{ 1252{
1253 struct timer_list *t = &per_cpu(mce_timer, data); 1253 struct timer_list *t = &__get_cpu_var(mce_timer);
1254 int *n; 1254 unsigned long iv;
1255 1255
1256 WARN_ON(smp_processor_id() != data); 1256 WARN_ON(smp_processor_id() != data);
1257 1257
@@ -1264,13 +1264,14 @@ static void mce_start_timer(unsigned long data)
1264 * Alert userspace if needed. If we logged an MCE, reduce the 1264 * Alert userspace if needed. If we logged an MCE, reduce the
1265 * polling interval, otherwise increase the polling interval. 1265 * polling interval, otherwise increase the polling interval.
1266 */ 1266 */
1267 n = &__get_cpu_var(mce_next_interval); 1267 iv = __this_cpu_read(mce_next_interval);
1268 if (mce_notify_irq()) 1268 if (mce_notify_irq())
1269 *n = max(*n/2, HZ/100); 1269 iv = max(iv, (unsigned long) HZ/100);
1270 else 1270 else
1271 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); 1271 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1272 __this_cpu_write(mce_next_interval, iv);
1272 1273
1273 t->expires = jiffies + *n; 1274 t->expires = jiffies + iv;
1274 add_timer_on(t, smp_processor_id()); 1275 add_timer_on(t, smp_processor_id());
1275} 1276}
1276 1277
@@ -1511,17 +1512,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1511static void __mcheck_cpu_init_timer(void) 1512static void __mcheck_cpu_init_timer(void)
1512{ 1513{
1513 struct timer_list *t = &__get_cpu_var(mce_timer); 1514 struct timer_list *t = &__get_cpu_var(mce_timer);
1514 int *n = &__get_cpu_var(mce_next_interval); 1515 unsigned long iv = __this_cpu_read(mce_next_interval);
1515 1516
1516 setup_timer(t, mce_start_timer, smp_processor_id()); 1517 setup_timer(t, mce_timer_fn, smp_processor_id());
1517 1518
1518 if (mce_ignore_ce) 1519 if (mce_ignore_ce)
1519 return; 1520 return;
1520 1521
1521 *n = check_interval * HZ; 1522 __this_cpu_write(mce_next_interval, iv);
1522 if (!*n) 1523 if (!iv)
1523 return; 1524 return;
1524 t->expires = round_jiffies(jiffies + *n); 1525 t->expires = round_jiffies(jiffies + iv);
1525 add_timer_on(t, smp_processor_id()); 1526 add_timer_on(t, smp_processor_id());
1526} 1527}
1527 1528
@@ -2231,7 +2232,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2231 case CPU_DOWN_FAILED_FROZEN: 2232 case CPU_DOWN_FAILED_FROZEN:
2232 if (!mce_ignore_ce && check_interval) { 2233 if (!mce_ignore_ce && check_interval) {
2233 t->expires = round_jiffies(jiffies + 2234 t->expires = round_jiffies(jiffies +
2234 __get_cpu_var(mce_next_interval)); 2235 per_cpu(mce_next_interval, cpu));
2235 add_timer_on(t, cpu); 2236 add_timer_on(t, cpu);
2236 } 2237 }
2237 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 2238 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);