diff options
-rw-r--r-- | arch/x86/kernel/apb_timer.c | 60 |
1 files changed, 4 insertions, 56 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 7c9ab59653e8..afc406498c9d 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -506,64 +506,12 @@ static int apbt_next_event(unsigned long delta, | |||
506 | return 0; | 506 | return 0; |
507 | } | 507 | } |
508 | 508 | ||
509 | /* | ||
510 | * APB timer clock is not in sync with pclk on Langwell, which translates to | ||
511 | * unreliable read value caused by sampling error. the error does not add up | ||
512 | * overtime and only happens when sampling a 0 as a 1 by mistake. so the time | ||
513 | * would go backwards. the following code is trying to prevent time traveling | ||
514 | * backwards. little bit paranoid. | ||
515 | */ | ||
516 | static cycle_t apbt_read_clocksource(struct clocksource *cs) | 509 | static cycle_t apbt_read_clocksource(struct clocksource *cs) |
517 | { | 510 | { |
518 | unsigned long t0, t1, t2; | 511 | unsigned long current_count; |
519 | static unsigned long last_read; | 512 | |
520 | 513 | current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE); | |
521 | bad_count: | 514 | return (cycle_t)~current_count; |
522 | t1 = apbt_readl(phy_cs_timer_id, | ||
523 | APBTMR_N_CURRENT_VALUE); | ||
524 | t2 = apbt_readl(phy_cs_timer_id, | ||
525 | APBTMR_N_CURRENT_VALUE); | ||
526 | if (unlikely(t1 < t2)) { | ||
527 | pr_debug("APBT: read current count error %lx:%lx:%lx\n", | ||
528 | t1, t2, t2 - t1); | ||
529 | goto bad_count; | ||
530 | } | ||
531 | /* | ||
532 | * check against cached last read, makes sure time does not go back. | ||
533 | * it could be a normal rollover but we will do tripple check anyway | ||
534 | */ | ||
535 | if (unlikely(t2 > last_read)) { | ||
536 | /* check if we have a normal rollover */ | ||
537 | unsigned long raw_intr_status = | ||
538 | apbt_readl_reg(APBTMRS_RAW_INT_STATUS); | ||
539 | /* | ||
540 | * cs timer interrupt is masked but raw intr bit is set if | ||
541 | * rollover occurs. then we read EOI reg to clear it. | ||
542 | */ | ||
543 | if (raw_intr_status & (1 << phy_cs_timer_id)) { | ||
544 | apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); | ||
545 | goto out; | ||
546 | } | ||
547 | pr_debug("APB CS going back %lx:%lx:%lx ", | ||
548 | t2, last_read, t2 - last_read); | ||
549 | bad_count_x3: | ||
550 | pr_debug("triple check enforced\n"); | ||
551 | t0 = apbt_readl(phy_cs_timer_id, | ||
552 | APBTMR_N_CURRENT_VALUE); | ||
553 | udelay(1); | ||
554 | t1 = apbt_readl(phy_cs_timer_id, | ||
555 | APBTMR_N_CURRENT_VALUE); | ||
556 | udelay(1); | ||
557 | t2 = apbt_readl(phy_cs_timer_id, | ||
558 | APBTMR_N_CURRENT_VALUE); | ||
559 | if ((t2 > t1) || (t1 > t0)) { | ||
560 | printk(KERN_ERR "Error: APB CS tripple check failed\n"); | ||
561 | goto bad_count_x3; | ||
562 | } | ||
563 | } | ||
564 | out: | ||
565 | last_read = t2; | ||
566 | return (cycle_t)~t2; | ||
567 | } | 515 | } |
568 | 516 | ||
569 | static int apbt_clocksource_register(void) | 517 | static int apbt_clocksource_register(void) |