diff options
Diffstat (limited to 'arch/x86/kernel/apb_timer.c')
-rw-r--r-- | arch/x86/kernel/apb_timer.c | 60 |
1 files changed, 4 insertions, 56 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51d4e166306..1293c709ee8 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -508,64 +508,12 @@ static int apbt_next_event(unsigned long delta, | |||
508 | return 0; | 508 | return 0; |
509 | } | 509 | } |
510 | 510 | ||
511 | /* | ||
512 | * APB timer clock is not in sync with pclk on Langwell, which translates to | ||
513 | * unreliable read value caused by sampling error. the error does not add up | ||
514 | * overtime and only happens when sampling a 0 as a 1 by mistake. so the time | ||
515 | * would go backwards. the following code is trying to prevent time traveling | ||
516 | * backwards. little bit paranoid. | ||
517 | */ | ||
518 | static cycle_t apbt_read_clocksource(struct clocksource *cs) | 511 | static cycle_t apbt_read_clocksource(struct clocksource *cs) |
519 | { | 512 | { |
520 | unsigned long t0, t1, t2; | 513 | unsigned long current_count; |
521 | static unsigned long last_read; | 514 | |
522 | 515 | current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE); | |
523 | bad_count: | 516 | return (cycle_t)~current_count; |
524 | t1 = apbt_readl(phy_cs_timer_id, | ||
525 | APBTMR_N_CURRENT_VALUE); | ||
526 | t2 = apbt_readl(phy_cs_timer_id, | ||
527 | APBTMR_N_CURRENT_VALUE); | ||
528 | if (unlikely(t1 < t2)) { | ||
529 | pr_debug("APBT: read current count error %lx:%lx:%lx\n", | ||
530 | t1, t2, t2 - t1); | ||
531 | goto bad_count; | ||
532 | } | ||
533 | /* | ||
534 | * check against cached last read, makes sure time does not go back. | ||
535 | * it could be a normal rollover but we will do tripple check anyway | ||
536 | */ | ||
537 | if (unlikely(t2 > last_read)) { | ||
538 | /* check if we have a normal rollover */ | ||
539 | unsigned long raw_intr_status = | ||
540 | apbt_readl_reg(APBTMRS_RAW_INT_STATUS); | ||
541 | /* | ||
542 | * cs timer interrupt is masked but raw intr bit is set if | ||
543 | * rollover occurs. then we read EOI reg to clear it. | ||
544 | */ | ||
545 | if (raw_intr_status & (1 << phy_cs_timer_id)) { | ||
546 | apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); | ||
547 | goto out; | ||
548 | } | ||
549 | pr_debug("APB CS going back %lx:%lx:%lx ", | ||
550 | t2, last_read, t2 - last_read); | ||
551 | bad_count_x3: | ||
552 | pr_debug("triple check enforced\n"); | ||
553 | t0 = apbt_readl(phy_cs_timer_id, | ||
554 | APBTMR_N_CURRENT_VALUE); | ||
555 | udelay(1); | ||
556 | t1 = apbt_readl(phy_cs_timer_id, | ||
557 | APBTMR_N_CURRENT_VALUE); | ||
558 | udelay(1); | ||
559 | t2 = apbt_readl(phy_cs_timer_id, | ||
560 | APBTMR_N_CURRENT_VALUE); | ||
561 | if ((t2 > t1) || (t1 > t0)) { | ||
562 | printk(KERN_ERR "Error: APB CS tripple check failed\n"); | ||
563 | goto bad_count_x3; | ||
564 | } | ||
565 | } | ||
566 | out: | ||
567 | last_read = t2; | ||
568 | return (cycle_t)~t2; | ||
569 | } | 517 | } |
570 | 518 | ||
571 | static int apbt_clocksource_register(void) | 519 | static int apbt_clocksource_register(void) |