aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apb_timer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
commita18f22a968de17b29f2310cdb7ba69163e65ec15 (patch)
treea7d56d88fad5e444d7661484109758a2f436129e /arch/x86/kernel/apb_timer.c
parenta1c57e0fec53defe745e64417eacdbd3618c3e66 (diff)
parent798778b8653f64b7b2162ac70eca10367cff6ce8 (diff)
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts: arch/ia64/kernel/cyclone.c arch/mips/kernel/i8253.c arch/x86/kernel/i8253.c Reason: Resolve conflicts so further cleanups do not conflict further Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/apb_timer.c')
-rw-r--r--arch/x86/kernel/apb_timer.c64
1 files changed, 6 insertions, 58 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 29ebf5a3b19..289e92862fd 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -283,7 +283,7 @@ static int __init apbt_clockevent_register(void)
283 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); 283 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
284 284
285 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { 285 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
286 apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; 286 adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
287 global_clock_event = &adev->evt; 287 global_clock_event = &adev->evt;
288 printk(KERN_DEBUG "%s clockevent registered as global\n", 288 printk(KERN_DEBUG "%s clockevent registered as global\n",
289 global_clock_event->name); 289 global_clock_event->name);
@@ -315,7 +315,7 @@ static void apbt_setup_irq(struct apbt_dev *adev)
315 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 315 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
316 irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 316 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
317 /* APB timer irqs are set up as mp_irqs, timer is edge type */ 317 /* APB timer irqs are set up as mp_irqs, timer is edge type */
318 __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); 318 __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
319 319
320 if (system_state == SYSTEM_BOOTING) { 320 if (system_state == SYSTEM_BOOTING) {
321 if (request_irq(adev->irq, apbt_interrupt_handler, 321 if (request_irq(adev->irq, apbt_interrupt_handler,
@@ -507,64 +507,12 @@ static int apbt_next_event(unsigned long delta,
507 return 0; 507 return 0;
508} 508}
509 509
510/*
511 * APB timer clock is not in sync with pclk on Langwell, which translates to
512 * unreliable read value caused by sampling error. the error does not add up
513 * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
514 * would go backwards. the following code is trying to prevent time traveling
515 * backwards. little bit paranoid.
516 */
517static cycle_t apbt_read_clocksource(struct clocksource *cs) 510static cycle_t apbt_read_clocksource(struct clocksource *cs)
518{ 511{
519 unsigned long t0, t1, t2; 512 unsigned long current_count;
520 static unsigned long last_read; 513
521 514 current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
522bad_count: 515 return (cycle_t)~current_count;
523 t1 = apbt_readl(phy_cs_timer_id,
524 APBTMR_N_CURRENT_VALUE);
525 t2 = apbt_readl(phy_cs_timer_id,
526 APBTMR_N_CURRENT_VALUE);
527 if (unlikely(t1 < t2)) {
528 pr_debug("APBT: read current count error %lx:%lx:%lx\n",
529 t1, t2, t2 - t1);
530 goto bad_count;
531 }
532 /*
533 * check against cached last read, makes sure time does not go back.
534 * it could be a normal rollover but we will do tripple check anyway
535 */
536 if (unlikely(t2 > last_read)) {
537 /* check if we have a normal rollover */
538 unsigned long raw_intr_status =
539 apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
540 /*
541 * cs timer interrupt is masked but raw intr bit is set if
542 * rollover occurs. then we read EOI reg to clear it.
543 */
544 if (raw_intr_status & (1 << phy_cs_timer_id)) {
545 apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
546 goto out;
547 }
548 pr_debug("APB CS going back %lx:%lx:%lx ",
549 t2, last_read, t2 - last_read);
550bad_count_x3:
551 pr_debug("triple check enforced\n");
552 t0 = apbt_readl(phy_cs_timer_id,
553 APBTMR_N_CURRENT_VALUE);
554 udelay(1);
555 t1 = apbt_readl(phy_cs_timer_id,
556 APBTMR_N_CURRENT_VALUE);
557 udelay(1);
558 t2 = apbt_readl(phy_cs_timer_id,
559 APBTMR_N_CURRENT_VALUE);
560 if ((t2 > t1) || (t1 > t0)) {
561 printk(KERN_ERR "Error: APB CS tripple check failed\n");
562 goto bad_count_x3;
563 }
564 }
565out:
566 last_read = t2;
567 return (cycle_t)~t2;
568} 516}
569 517
570static int apbt_clocksource_register(void) 518static int apbt_clocksource_register(void)