aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjacob.jun.pan@linux.intel.com <jacob.jun.pan@linux.intel.com>2011-02-18 16:42:54 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-02-18 17:14:54 -0500
commit5df91509d324d44cfb11e55d9cb02fe18b53b045 (patch)
tree3076ee0783e0228bffd58011ddc8d3396a15bd62
parent13884c6680973f0ce3483dc59b636b4962d6dafe (diff)
x86: mrst: Remove apb timer read workaround
APB timer current count was unreliable in the earlier silicon, which could result in time going backwards. This problem has been fixed in the current silicon stepping. This patch removes the workaround which was used to check and prevent timer rolling back when APB timer is used as clocksource device. The workaround code was also flawed by potential race condition around the cached read value last_read. Though a fix can be done by assigning last_read to a local variable at the beginning of apbt_read_clocksource(), but this is not necessary anymore. [ tglx: A sane timer on an Intel chip - I can't believe it ] Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Alan Cox <alan@linux.intel.com> LKML-Reference: <1298065374-25532-1-git-send-email-jacob.jun.pan@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/apb_timer.c60
1 files changed, 4 insertions, 56 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 7c9ab59653e8..afc406498c9d 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -506,64 +506,12 @@ static int apbt_next_event(unsigned long delta,
506 return 0; 506 return 0;
507} 507}
508 508
509/*
510 * APB timer clock is not in sync with pclk on Langwell, which translates to
511 * unreliable read value caused by sampling error. the error does not add up
512 * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
513 * would go backwards. the following code is trying to prevent time traveling
514 * backwards. little bit paranoid.
515 */
516static cycle_t apbt_read_clocksource(struct clocksource *cs) 509static cycle_t apbt_read_clocksource(struct clocksource *cs)
517{ 510{
518 unsigned long t0, t1, t2; 511 unsigned long current_count;
519 static unsigned long last_read; 512
520 513 current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
521bad_count: 514 return (cycle_t)~current_count;
522 t1 = apbt_readl(phy_cs_timer_id,
523 APBTMR_N_CURRENT_VALUE);
524 t2 = apbt_readl(phy_cs_timer_id,
525 APBTMR_N_CURRENT_VALUE);
526 if (unlikely(t1 < t2)) {
527 pr_debug("APBT: read current count error %lx:%lx:%lx\n",
528 t1, t2, t2 - t1);
529 goto bad_count;
530 }
531 /*
532 * check against cached last read, makes sure time does not go back.
533 * it could be a normal rollover but we will do tripple check anyway
534 */
535 if (unlikely(t2 > last_read)) {
536 /* check if we have a normal rollover */
537 unsigned long raw_intr_status =
538 apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
539 /*
540 * cs timer interrupt is masked but raw intr bit is set if
541 * rollover occurs. then we read EOI reg to clear it.
542 */
543 if (raw_intr_status & (1 << phy_cs_timer_id)) {
544 apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
545 goto out;
546 }
547 pr_debug("APB CS going back %lx:%lx:%lx ",
548 t2, last_read, t2 - last_read);
549bad_count_x3:
550 pr_debug("triple check enforced\n");
551 t0 = apbt_readl(phy_cs_timer_id,
552 APBTMR_N_CURRENT_VALUE);
553 udelay(1);
554 t1 = apbt_readl(phy_cs_timer_id,
555 APBTMR_N_CURRENT_VALUE);
556 udelay(1);
557 t2 = apbt_readl(phy_cs_timer_id,
558 APBTMR_N_CURRENT_VALUE);
559 if ((t2 > t1) || (t1 > t0)) {
560 printk(KERN_ERR "Error: APB CS tripple check failed\n");
561 goto bad_count_x3;
562 }
563 }
564out:
565 last_read = t2;
566 return (cycle_t)~t2;
567} 515}
568 516
569static int apbt_clocksource_register(void) 517static int apbt_clocksource_register(void)