aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c55
1 files changed, 22 insertions, 33 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 360674c485f5..68a873af09a8 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -120,6 +120,25 @@ static void tick_sched_do_timer(ktime_t now)
120 tick_do_update_jiffies64(now); 120 tick_do_update_jiffies64(now);
121} 121}
122 122
123static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
124{
125 /*
126 * When we are idle and the tick is stopped, we have to touch
127 * the watchdog as we might not schedule for a really long
128 * time. This happens on complete idle SMP systems while
129 * waiting on the login prompt. We also increment the "start of
130 * idle" jiffy stamp so the idle accounting adjustment we do
131 * when we go busy again does not account too much ticks.
132 */
133 if (ts->tick_stopped) {
134 touch_softlockup_watchdog();
135 if (is_idle_task(current))
136 ts->idle_jiffies++;
137 }
138 update_process_times(user_mode(regs));
139 profile_tick(CPU_PROFILING);
140}
141
123/* 142/*
124 * NOHZ - aka dynamic tick functionality 143 * NOHZ - aka dynamic tick functionality
125 */ 144 */
@@ -675,22 +694,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
675 dev->next_event.tv64 = KTIME_MAX; 694 dev->next_event.tv64 = KTIME_MAX;
676 695
677 tick_sched_do_timer(now); 696 tick_sched_do_timer(now);
678 697 tick_sched_handle(ts, regs);
679 /*
680 * When we are idle and the tick is stopped, we have to touch
681 * the watchdog as we might not schedule for a really long
682 * time. This happens on complete idle SMP systems while
683 * waiting on the login prompt. We also increment the "start
684 * of idle" jiffy stamp so the idle accounting adjustment we
685 * do when we go busy again does not account too much ticks.
686 */
687 if (ts->tick_stopped) {
688 touch_softlockup_watchdog();
689 ts->idle_jiffies++;
690 }
691
692 update_process_times(user_mode(regs));
693 profile_tick(CPU_PROFILING);
694 698
695 while (tick_nohz_reprogram(ts, now)) { 699 while (tick_nohz_reprogram(ts, now)) {
696 now = ktime_get(); 700 now = ktime_get();
@@ -818,23 +822,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
818 * Do not call, when we are not in irq context and have 822 * Do not call, when we are not in irq context and have
819 * no valid regs pointer 823 * no valid regs pointer
820 */ 824 */
821 if (regs) { 825 if (regs)
822 /* 826 tick_sched_handle(ts, regs);
823 * When we are idle and the tick is stopped, we have to touch
824 * the watchdog as we might not schedule for a really long
825 * time. This happens on complete idle SMP systems while
826 * waiting on the login prompt. We also increment the "start of
827 * idle" jiffy stamp so the idle accounting adjustment we do
828 * when we go busy again does not account too much ticks.
829 */
830 if (ts->tick_stopped) {
831 touch_softlockup_watchdog();
832 if (is_idle_task(current))
833 ts->idle_jiffies++;
834 }
835 update_process_times(user_mode(regs));
836 profile_tick(CPU_PROFILING);
837 }
838 827
839 hrtimer_forward(timer, now, tick_period); 828 hrtimer_forward(timer, now, tick_period);
840 829