diff options
Diffstat (limited to 'arch/x86/kernel/apb_timer.c')
-rw-r--r-- | arch/x86/kernel/apb_timer.c | 76 |
1 files changed, 13 insertions, 63 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 7c9ab59653e..1293c709ee8 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void) | |||
284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); | 284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); |
285 | 285 | ||
286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { | 286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { |
287 | apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; | 287 | adev->evt.rating = APBT_CLOCKEVENT_RATING - 100; |
288 | global_clock_event = &adev->evt; | 288 | global_clock_event = &adev->evt; |
289 | printk(KERN_DEBUG "%s clockevent registered as global\n", | 289 | printk(KERN_DEBUG "%s clockevent registered as global\n", |
290 | global_clock_event->name); | 290 | global_clock_event->name); |
@@ -313,14 +313,16 @@ static void apbt_setup_irq(struct apbt_dev *adev) | |||
313 | if (adev->irq == 0) | 313 | if (adev->irq == 0) |
314 | return; | 314 | return; |
315 | 315 | ||
316 | irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); | ||
317 | irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); | ||
318 | /* APB timer irqs are set up as mp_irqs, timer is edge type */ | ||
319 | __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); | ||
320 | |||
316 | if (system_state == SYSTEM_BOOTING) { | 321 | if (system_state == SYSTEM_BOOTING) { |
317 | irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); | ||
318 | irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); | ||
319 | /* APB timer irqs are set up as mp_irqs, timer is edge type */ | ||
320 | __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); | ||
321 | if (request_irq(adev->irq, apbt_interrupt_handler, | 322 | if (request_irq(adev->irq, apbt_interrupt_handler, |
322 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | 323 | IRQF_TIMER | IRQF_DISABLED | |
323 | adev->name, adev)) { | 324 | IRQF_NOBALANCING, |
325 | adev->name, adev)) { | ||
324 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | 326 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", |
325 | adev->num); | 327 | adev->num); |
326 | } | 328 | } |
@@ -506,64 +508,12 @@ static int apbt_next_event(unsigned long delta, | |||
506 | return 0; | 508 | return 0; |
507 | } | 509 | } |
508 | 510 | ||
509 | /* | ||
510 | * APB timer clock is not in sync with pclk on Langwell, which translates to | ||
511 | * unreliable read value caused by sampling error. the error does not add up | ||
512 | * overtime and only happens when sampling a 0 as a 1 by mistake. so the time | ||
513 | * would go backwards. the following code is trying to prevent time traveling | ||
514 | * backwards. little bit paranoid. | ||
515 | */ | ||
516 | static cycle_t apbt_read_clocksource(struct clocksource *cs) | 511 | static cycle_t apbt_read_clocksource(struct clocksource *cs) |
517 | { | 512 | { |
518 | unsigned long t0, t1, t2; | 513 | unsigned long current_count; |
519 | static unsigned long last_read; | 514 | |
520 | 515 | current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE); | |
521 | bad_count: | 516 | return (cycle_t)~current_count; |
522 | t1 = apbt_readl(phy_cs_timer_id, | ||
523 | APBTMR_N_CURRENT_VALUE); | ||
524 | t2 = apbt_readl(phy_cs_timer_id, | ||
525 | APBTMR_N_CURRENT_VALUE); | ||
526 | if (unlikely(t1 < t2)) { | ||
527 | pr_debug("APBT: read current count error %lx:%lx:%lx\n", | ||
528 | t1, t2, t2 - t1); | ||
529 | goto bad_count; | ||
530 | } | ||
531 | /* | ||
532 | * check against cached last read, makes sure time does not go back. | ||
533 | * it could be a normal rollover but we will do tripple check anyway | ||
534 | */ | ||
535 | if (unlikely(t2 > last_read)) { | ||
536 | /* check if we have a normal rollover */ | ||
537 | unsigned long raw_intr_status = | ||
538 | apbt_readl_reg(APBTMRS_RAW_INT_STATUS); | ||
539 | /* | ||
540 | * cs timer interrupt is masked but raw intr bit is set if | ||
541 | * rollover occurs. then we read EOI reg to clear it. | ||
542 | */ | ||
543 | if (raw_intr_status & (1 << phy_cs_timer_id)) { | ||
544 | apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); | ||
545 | goto out; | ||
546 | } | ||
547 | pr_debug("APB CS going back %lx:%lx:%lx ", | ||
548 | t2, last_read, t2 - last_read); | ||
549 | bad_count_x3: | ||
550 | pr_debug("triple check enforced\n"); | ||
551 | t0 = apbt_readl(phy_cs_timer_id, | ||
552 | APBTMR_N_CURRENT_VALUE); | ||
553 | udelay(1); | ||
554 | t1 = apbt_readl(phy_cs_timer_id, | ||
555 | APBTMR_N_CURRENT_VALUE); | ||
556 | udelay(1); | ||
557 | t2 = apbt_readl(phy_cs_timer_id, | ||
558 | APBTMR_N_CURRENT_VALUE); | ||
559 | if ((t2 > t1) || (t1 > t0)) { | ||
560 | printk(KERN_ERR "Error: APB CS tripple check failed\n"); | ||
561 | goto bad_count_x3; | ||
562 | } | ||
563 | } | ||
564 | out: | ||
565 | last_read = t2; | ||
566 | return (cycle_t)~t2; | ||
567 | } | 517 | } |
568 | 518 | ||
569 | static int apbt_clocksource_register(void) | 519 | static int apbt_clocksource_register(void) |