aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c137
1 files changed, 63 insertions, 74 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a40260885265..d58e552d9fd1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
32 32
33/* 33/*
34 * The time, when the last jiffy update happened. Protected by xtime_lock. 34 * The time, when the last jiffy update happened. Protected by jiffies_lock.
35 */ 35 */
36static ktime_t last_jiffies_update; 36static ktime_t last_jiffies_update;
37 37
@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
49 ktime_t delta; 49 ktime_t delta;
50 50
51 /* 51 /*
52 * Do a quick check without holding xtime_lock: 52 * Do a quick check without holding jiffies_lock:
53 */ 53 */
54 delta = ktime_sub(now, last_jiffies_update); 54 delta = ktime_sub(now, last_jiffies_update);
55 if (delta.tv64 < tick_period.tv64) 55 if (delta.tv64 < tick_period.tv64)
56 return; 56 return;
57 57
58 /* Reevalute with xtime_lock held */ 58 /* Reevalute with jiffies_lock held */
59 write_seqlock(&xtime_lock); 59 write_seqlock(&jiffies_lock);
60 60
61 delta = ktime_sub(now, last_jiffies_update); 61 delta = ktime_sub(now, last_jiffies_update);
62 if (delta.tv64 >= tick_period.tv64) { 62 if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
79 /* Keep the tick_next_period variable up to date */ 79 /* Keep the tick_next_period variable up to date */
80 tick_next_period = ktime_add(last_jiffies_update, tick_period); 80 tick_next_period = ktime_add(last_jiffies_update, tick_period);
81 } 81 }
82 write_sequnlock(&xtime_lock); 82 write_sequnlock(&jiffies_lock);
83} 83}
84 84
85/* 85/*
@@ -89,15 +89,58 @@ static ktime_t tick_init_jiffy_update(void)
89{ 89{
90 ktime_t period; 90 ktime_t period;
91 91
92 write_seqlock(&xtime_lock); 92 write_seqlock(&jiffies_lock);
93 /* Did we start the jiffies update yet ? */ 93 /* Did we start the jiffies update yet ? */
94 if (last_jiffies_update.tv64 == 0) 94 if (last_jiffies_update.tv64 == 0)
95 last_jiffies_update = tick_next_period; 95 last_jiffies_update = tick_next_period;
96 period = last_jiffies_update; 96 period = last_jiffies_update;
97 write_sequnlock(&xtime_lock); 97 write_sequnlock(&jiffies_lock);
98 return period; 98 return period;
99} 99}
100 100
101
102static void tick_sched_do_timer(ktime_t now)
103{
104 int cpu = smp_processor_id();
105
106#ifdef CONFIG_NO_HZ
107 /*
108 * Check if the do_timer duty was dropped. We don't care about
109 * concurrency: This happens only when the cpu in charge went
110 * into a long sleep. If two cpus happen to assign themself to
111 * this duty, then the jiffies update is still serialized by
112 * jiffies_lock.
113 */
114 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
115 tick_do_timer_cpu = cpu;
116#endif
117
118 /* Check, if the jiffies need an update */
119 if (tick_do_timer_cpu == cpu)
120 tick_do_update_jiffies64(now);
121}
122
123static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
124{
125#ifdef CONFIG_NO_HZ
126 /*
127 * When we are idle and the tick is stopped, we have to touch
128 * the watchdog as we might not schedule for a really long
129 * time. This happens on complete idle SMP systems while
130 * waiting on the login prompt. We also increment the "start of
131 * idle" jiffy stamp so the idle accounting adjustment we do
132 * when we go busy again does not account too much ticks.
133 */
134 if (ts->tick_stopped) {
135 touch_softlockup_watchdog();
136 if (is_idle_task(current))
137 ts->idle_jiffies++;
138 }
139#endif
140 update_process_times(user_mode(regs));
141 profile_tick(CPU_PROFILING);
142}
143
101/* 144/*
102 * NOHZ - aka dynamic tick functionality 145 * NOHZ - aka dynamic tick functionality
103 */ 146 */
@@ -282,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
282 325
283 /* Read jiffies and the time when jiffies were updated last */ 326 /* Read jiffies and the time when jiffies were updated last */
284 do { 327 do {
285 seq = read_seqbegin(&xtime_lock); 328 seq = read_seqbegin(&jiffies_lock);
286 last_update = last_jiffies_update; 329 last_update = last_jiffies_update;
287 last_jiffies = jiffies; 330 last_jiffies = jiffies;
288 time_delta = timekeeping_max_deferment(); 331 time_delta = timekeeping_max_deferment();
289 } while (read_seqretry(&xtime_lock, seq)); 332 } while (read_seqretry(&jiffies_lock, seq));
290 333
291 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || 334 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
292 arch_needs_cpu(cpu)) { 335 arch_needs_cpu(cpu)) {
@@ -526,6 +569,8 @@ void tick_nohz_irq_exit(void)
526 if (!ts->inidle) 569 if (!ts->inidle)
527 return; 570 return;
528 571
572 /* Cancel the timer because CPU already waken up from the C-states*/
573 menu_hrtimer_cancel();
529 __tick_nohz_idle_enter(ts); 574 __tick_nohz_idle_enter(ts);
530} 575}
531 576
@@ -621,6 +666,8 @@ void tick_nohz_idle_exit(void)
621 666
622 ts->inidle = 0; 667 ts->inidle = 0;
623 668
669 /* Cancel the timer because CPU already waken up from the C-states*/
670 menu_hrtimer_cancel();
624 if (ts->idle_active || ts->tick_stopped) 671 if (ts->idle_active || ts->tick_stopped)
625 now = ktime_get(); 672 now = ktime_get();
626 673
@@ -648,40 +695,12 @@ static void tick_nohz_handler(struct clock_event_device *dev)
648{ 695{
649 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 696 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
650 struct pt_regs *regs = get_irq_regs(); 697 struct pt_regs *regs = get_irq_regs();
651 int cpu = smp_processor_id();
652 ktime_t now = ktime_get(); 698 ktime_t now = ktime_get();
653 699
654 dev->next_event.tv64 = KTIME_MAX; 700 dev->next_event.tv64 = KTIME_MAX;
655 701
656 /* 702 tick_sched_do_timer(now);
657 * Check if the do_timer duty was dropped. We don't care about 703 tick_sched_handle(ts, regs);
658 * concurrency: This happens only when the cpu in charge went
659 * into a long sleep. If two cpus happen to assign themself to
660 * this duty, then the jiffies update is still serialized by
661 * xtime_lock.
662 */
663 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
664 tick_do_timer_cpu = cpu;
665
666 /* Check, if the jiffies need an update */
667 if (tick_do_timer_cpu == cpu)
668 tick_do_update_jiffies64(now);
669
670 /*
671 * When we are idle and the tick is stopped, we have to touch
672 * the watchdog as we might not schedule for a really long
673 * time. This happens on complete idle SMP systems while
674 * waiting on the login prompt. We also increment the "start
675 * of idle" jiffy stamp so the idle accounting adjustment we
676 * do when we go busy again does not account too much ticks.
677 */
678 if (ts->tick_stopped) {
679 touch_softlockup_watchdog();
680 ts->idle_jiffies++;
681 }
682
683 update_process_times(user_mode(regs));
684 profile_tick(CPU_PROFILING);
685 704
686 while (tick_nohz_reprogram(ts, now)) { 705 while (tick_nohz_reprogram(ts, now)) {
687 now = ktime_get(); 706 now = ktime_get();
@@ -794,7 +813,7 @@ void tick_check_idle(int cpu)
794#ifdef CONFIG_HIGH_RES_TIMERS 813#ifdef CONFIG_HIGH_RES_TIMERS
795/* 814/*
796 * We rearm the timer until we get disabled by the idle code. 815 * We rearm the timer until we get disabled by the idle code.
797 * Called with interrupts disabled and timer->base->cpu_base->lock held. 816 * Called with interrupts disabled.
798 */ 817 */
799static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 818static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
800{ 819{
@@ -802,45 +821,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
802 container_of(timer, struct tick_sched, sched_timer); 821 container_of(timer, struct tick_sched, sched_timer);
803 struct pt_regs *regs = get_irq_regs(); 822 struct pt_regs *regs = get_irq_regs();
804 ktime_t now = ktime_get(); 823 ktime_t now = ktime_get();
805 int cpu = smp_processor_id();
806 824
807#ifdef CONFIG_NO_HZ 825 tick_sched_do_timer(now);
808 /*
809 * Check if the do_timer duty was dropped. We don't care about
810 * concurrency: This happens only when the cpu in charge went
811 * into a long sleep. If two cpus happen to assign themself to
812 * this duty, then the jiffies update is still serialized by
813 * xtime_lock.
814 */
815 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
816 tick_do_timer_cpu = cpu;
817#endif
818
819 /* Check, if the jiffies need an update */
820 if (tick_do_timer_cpu == cpu)
821 tick_do_update_jiffies64(now);
822 826
823 /* 827 /*
824 * Do not call, when we are not in irq context and have 828 * Do not call, when we are not in irq context and have
825 * no valid regs pointer 829 * no valid regs pointer
826 */ 830 */
827 if (regs) { 831 if (regs)
828 /* 832 tick_sched_handle(ts, regs);
829 * When we are idle and the tick is stopped, we have to touch
830 * the watchdog as we might not schedule for a really long
831 * time. This happens on complete idle SMP systems while
832 * waiting on the login prompt. We also increment the "start of
833 * idle" jiffy stamp so the idle accounting adjustment we do
834 * when we go busy again does not account too much ticks.
835 */
836 if (ts->tick_stopped) {
837 touch_softlockup_watchdog();
838 if (is_idle_task(current))
839 ts->idle_jiffies++;
840 }
841 update_process_times(user_mode(regs));
842 profile_tick(CPU_PROFILING);
843 }
844 833
845 hrtimer_forward(timer, now, tick_period); 834 hrtimer_forward(timer, now, tick_period);
846 835
@@ -874,7 +863,7 @@ void tick_setup_sched_timer(void)
874 /* Get the next period (per cpu) */ 863 /* Get the next period (per cpu) */
875 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 864 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
876 865
877 /* Offset the tick to avert xtime_lock contention. */ 866 /* Offset the tick to avert jiffies_lock contention. */
878 if (sched_skew_tick) { 867 if (sched_skew_tick) {
879 u64 offset = ktime_to_ns(tick_period) >> 1; 868 u64 offset = ktime_to_ns(tick_period) >> 1;
880 do_div(offset, num_possible_cpus()); 869 do_div(offset, num_possible_cpus());