aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorJacob Pan <jacob.jun.pan@linux.intel.com>2010-03-03 00:01:34 -0500
committerH. Peter Anvin <hpa@zytor.com>2010-03-03 15:04:59 -0500
commit3010673ef5f7bef4b4685566a0713de1b4306c93 (patch)
tree227355682ffcb82f3286e17826ffbc97e67fc36c /arch/x86/kernel
parentd8111cd91abee016d62b401e057fee66ba80be67 (diff)
x86, mrst: Fix APB timer per cpu clockevent
The current APB timer code incorrectly registers a static copy of the clockevent device for the boot CPU. The per cpu clockevent should be used instead. This bug was hidden by zero-initialized data; as such it did not get exposed in testing, but was discovered by code review. Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> LKML-Reference: <1267592494-7723-1-git-send-email-jacob.jun.pan@linux.intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apb_timer.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 83a345b0256c..6f27f8b75795 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -84,9 +84,10 @@ struct apbt_dev {
84 84
85int disable_apbt_percpu __cpuinitdata; 85int disable_apbt_percpu __cpuinitdata;
86 86
87static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
88
87#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
88static unsigned int apbt_num_timers_used; 90static unsigned int apbt_num_timers_used;
89static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
90static struct apbt_dev *apbt_devs; 91static struct apbt_dev *apbt_devs;
91#endif 92#endif
92 93
@@ -302,6 +303,7 @@ static void apbt_disable_int(int n)
302static int __init apbt_clockevent_register(void) 303static int __init apbt_clockevent_register(void)
303{ 304{
304 struct sfi_timer_table_entry *mtmr; 305 struct sfi_timer_table_entry *mtmr;
306 struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev);
305 307
306 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 308 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
307 if (mtmr == NULL) { 309 if (mtmr == NULL) {
@@ -329,22 +331,24 @@ static int __init apbt_clockevent_register(void)
329 * global if not used for per cpu timer. 331 * global if not used for per cpu timer.
330 */ 332 */
331 apbt_clockevent.cpumask = cpumask_of(smp_processor_id()); 333 apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
334 adev->num = smp_processor_id();
335 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
332 336
333 if (disable_apbt_percpu) { 337 if (disable_apbt_percpu) {
334 apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; 338 apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
335 global_clock_event = &apbt_clockevent; 339 global_clock_event = &adev->evt;
336 printk(KERN_DEBUG "%s clockevent registered as global\n", 340 printk(KERN_DEBUG "%s clockevent registered as global\n",
337 global_clock_event->name); 341 global_clock_event->name);
338 } 342 }
339 343
340 if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler, 344 if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler,
341 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 345 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
342 apbt_clockevent.name, &apbt_clockevent)) { 346 apbt_clockevent.name, adev)) {
343 printk(KERN_ERR "Failed request IRQ for APBT%d\n", 347 printk(KERN_ERR "Failed request IRQ for APBT%d\n",
344 apbt_clockevent.irq); 348 apbt_clockevent.irq);
345 } 349 }
346 350
347 clockevents_register_device(&apbt_clockevent); 351 clockevents_register_device(&adev->evt);
348 /* Start APBT 0 interrupts */ 352 /* Start APBT 0 interrupts */
349 apbt_enable_int(APBT_CLOCKEVENT0_NUM); 353 apbt_enable_int(APBT_CLOCKEVENT0_NUM);
350 354