aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-10-14 20:03:27 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2012-10-15 12:35:11 -0400
commit5bb962269c29cbb878414cddf0ebdff8c5cdef0a (patch)
treeb6bb65f0ef0b268726c396def14ec2cfa963122b /kernel/time
parentddffeb8c4d0331609ef2581d84de4d763607bd37 (diff)
tick: Consolidate timekeeping handling code
Unify the duplicated timekeeping handling code of low and high res tick sched handlers. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-sched.c54
1 files changed, 24 insertions, 30 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a40260885265..360674c485f5 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -98,6 +98,28 @@ static ktime_t tick_init_jiffy_update(void)
98 return period; 98 return period;
99} 99}
100 100
101
102static void tick_sched_do_timer(ktime_t now)
103{
104 int cpu = smp_processor_id();
105
106#ifdef CONFIG_NO_HZ
107 /*
108 * Check if the do_timer duty was dropped. We don't care about
109 * concurrency: This happens only when the cpu in charge went
110 * into a long sleep. If two cpus happen to assign themself to
111 * this duty, then the jiffies update is still serialized by
112 * xtime_lock.
113 */
114 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
115 tick_do_timer_cpu = cpu;
116#endif
117
118 /* Check, if the jiffies need an update */
119 if (tick_do_timer_cpu == cpu)
120 tick_do_update_jiffies64(now);
121}
122
101/* 123/*
102 * NOHZ - aka dynamic tick functionality 124 * NOHZ - aka dynamic tick functionality
103 */ 125 */
@@ -648,24 +670,11 @@ static void tick_nohz_handler(struct clock_event_device *dev)
648{ 670{
649 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 671 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
650 struct pt_regs *regs = get_irq_regs(); 672 struct pt_regs *regs = get_irq_regs();
651 int cpu = smp_processor_id();
652 ktime_t now = ktime_get(); 673 ktime_t now = ktime_get();
653 674
654 dev->next_event.tv64 = KTIME_MAX; 675 dev->next_event.tv64 = KTIME_MAX;
655 676
656 /* 677 tick_sched_do_timer(now);
657 * Check if the do_timer duty was dropped. We don't care about
658 * concurrency: This happens only when the cpu in charge went
659 * into a long sleep. If two cpus happen to assign themself to
660 * this duty, then the jiffies update is still serialized by
661 * xtime_lock.
662 */
663 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
664 tick_do_timer_cpu = cpu;
665
666 /* Check, if the jiffies need an update */
667 if (tick_do_timer_cpu == cpu)
668 tick_do_update_jiffies64(now);
669 678
670 /* 679 /*
671 * When we are idle and the tick is stopped, we have to touch 680 * When we are idle and the tick is stopped, we have to touch
@@ -802,23 +811,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
802 container_of(timer, struct tick_sched, sched_timer); 811 container_of(timer, struct tick_sched, sched_timer);
803 struct pt_regs *regs = get_irq_regs(); 812 struct pt_regs *regs = get_irq_regs();
804 ktime_t now = ktime_get(); 813 ktime_t now = ktime_get();
805 int cpu = smp_processor_id();
806 814
807#ifdef CONFIG_NO_HZ 815 tick_sched_do_timer(now);
808 /*
809 * Check if the do_timer duty was dropped. We don't care about
810 * concurrency: This happens only when the cpu in charge went
811 * into a long sleep. If two cpus happen to assign themself to
812 * this duty, then the jiffies update is still serialized by
813 * xtime_lock.
814 */
815 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
816 tick_do_timer_cpu = cpu;
817#endif
818
819 /* Check, if the jiffies need an update */
820 if (tick_do_timer_cpu == cpu)
821 tick_do_update_jiffies64(now);
822 816
823 /* 817 /*
824 * Do not call, when we are not in irq context and have 818 * Do not call, when we are not in irq context and have