diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-10-21 12:14:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-10-21 12:14:02 -0400 |
commit | 0acfd009be1f7eedb450dda1bec4c2a03d7c808b (patch) | |
tree | 6bb2956e45fb582bdc9d3af5122daec61e531380 /kernel/time | |
parent | 6f0c0580b70c89094b3422ba81118c7b959c7556 (diff) | |
parent | 94a571402012e0dfaa23bbbdd64d033f48477d86 (diff) |
Merge branch 'nohz/core' of git://github.com/fweisbec/linux-dynticks into timers/core
Pull uncontroversial cleanup/refactoring nohz patches from Frederic Weisbecker.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/tick-sched.c | 111 |
1 files changed, 48 insertions, 63 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a40260885265..766d4c47a4a4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -98,6 +98,49 @@ static ktime_t tick_init_jiffy_update(void) | |||
98 | return period; | 98 | return period; |
99 | } | 99 | } |
100 | 100 | ||
101 | |||
102 | static void tick_sched_do_timer(ktime_t now) | ||
103 | { | ||
104 | int cpu = smp_processor_id(); | ||
105 | |||
106 | #ifdef CONFIG_NO_HZ | ||
107 | /* | ||
108 | * Check if the do_timer duty was dropped. We don't care about | ||
109 | * concurrency: This happens only when the cpu in charge went | ||
110 | * into a long sleep. If two cpus happen to assign themself to | ||
111 | * this duty, then the jiffies update is still serialized by | ||
112 | * xtime_lock. | ||
113 | */ | ||
114 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) | ||
115 | tick_do_timer_cpu = cpu; | ||
116 | #endif | ||
117 | |||
118 | /* Check, if the jiffies need an update */ | ||
119 | if (tick_do_timer_cpu == cpu) | ||
120 | tick_do_update_jiffies64(now); | ||
121 | } | ||
122 | |||
123 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) | ||
124 | { | ||
125 | #ifdef CONFIG_NO_HZ | ||
126 | /* | ||
127 | * When we are idle and the tick is stopped, we have to touch | ||
128 | * the watchdog as we might not schedule for a really long | ||
129 | * time. This happens on complete idle SMP systems while | ||
130 | * waiting on the login prompt. We also increment the "start of | ||
131 | * idle" jiffy stamp so the idle accounting adjustment we do | ||
132 | * when we go busy again does not account too much ticks. | ||
133 | */ | ||
134 | if (ts->tick_stopped) { | ||
135 | touch_softlockup_watchdog(); | ||
136 | if (is_idle_task(current)) | ||
137 | ts->idle_jiffies++; | ||
138 | } | ||
139 | #endif | ||
140 | update_process_times(user_mode(regs)); | ||
141 | profile_tick(CPU_PROFILING); | ||
142 | } | ||
143 | |||
101 | /* | 144 | /* |
102 | * NOHZ - aka dynamic tick functionality | 145 | * NOHZ - aka dynamic tick functionality |
103 | */ | 146 | */ |
@@ -648,40 +691,12 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
648 | { | 691 | { |
649 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 692 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
650 | struct pt_regs *regs = get_irq_regs(); | 693 | struct pt_regs *regs = get_irq_regs(); |
651 | int cpu = smp_processor_id(); | ||
652 | ktime_t now = ktime_get(); | 694 | ktime_t now = ktime_get(); |
653 | 695 | ||
654 | dev->next_event.tv64 = KTIME_MAX; | 696 | dev->next_event.tv64 = KTIME_MAX; |
655 | 697 | ||
656 | /* | 698 | tick_sched_do_timer(now); |
657 | * Check if the do_timer duty was dropped. We don't care about | 699 | tick_sched_handle(ts, regs); |
658 | * concurrency: This happens only when the cpu in charge went | ||
659 | * into a long sleep. If two cpus happen to assign themself to | ||
660 | * this duty, then the jiffies update is still serialized by | ||
661 | * xtime_lock. | ||
662 | */ | ||
663 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) | ||
664 | tick_do_timer_cpu = cpu; | ||
665 | |||
666 | /* Check, if the jiffies need an update */ | ||
667 | if (tick_do_timer_cpu == cpu) | ||
668 | tick_do_update_jiffies64(now); | ||
669 | |||
670 | /* | ||
671 | * When we are idle and the tick is stopped, we have to touch | ||
672 | * the watchdog as we might not schedule for a really long | ||
673 | * time. This happens on complete idle SMP systems while | ||
674 | * waiting on the login prompt. We also increment the "start | ||
675 | * of idle" jiffy stamp so the idle accounting adjustment we | ||
676 | * do when we go busy again does not account too much ticks. | ||
677 | */ | ||
678 | if (ts->tick_stopped) { | ||
679 | touch_softlockup_watchdog(); | ||
680 | ts->idle_jiffies++; | ||
681 | } | ||
682 | |||
683 | update_process_times(user_mode(regs)); | ||
684 | profile_tick(CPU_PROFILING); | ||
685 | 700 | ||
686 | while (tick_nohz_reprogram(ts, now)) { | 701 | while (tick_nohz_reprogram(ts, now)) { |
687 | now = ktime_get(); | 702 | now = ktime_get(); |
@@ -802,45 +817,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
802 | container_of(timer, struct tick_sched, sched_timer); | 817 | container_of(timer, struct tick_sched, sched_timer); |
803 | struct pt_regs *regs = get_irq_regs(); | 818 | struct pt_regs *regs = get_irq_regs(); |
804 | ktime_t now = ktime_get(); | 819 | ktime_t now = ktime_get(); |
805 | int cpu = smp_processor_id(); | ||
806 | |||
807 | #ifdef CONFIG_NO_HZ | ||
808 | /* | ||
809 | * Check if the do_timer duty was dropped. We don't care about | ||
810 | * concurrency: This happens only when the cpu in charge went | ||
811 | * into a long sleep. If two cpus happen to assign themself to | ||
812 | * this duty, then the jiffies update is still serialized by | ||
813 | * xtime_lock. | ||
814 | */ | ||
815 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) | ||
816 | tick_do_timer_cpu = cpu; | ||
817 | #endif | ||
818 | 820 | ||
819 | /* Check, if the jiffies need an update */ | 821 | tick_sched_do_timer(now); |
820 | if (tick_do_timer_cpu == cpu) | ||
821 | tick_do_update_jiffies64(now); | ||
822 | 822 | ||
823 | /* | 823 | /* |
824 | * Do not call, when we are not in irq context and have | 824 | * Do not call, when we are not in irq context and have |
825 | * no valid regs pointer | 825 | * no valid regs pointer |
826 | */ | 826 | */ |
827 | if (regs) { | 827 | if (regs) |
828 | /* | 828 | tick_sched_handle(ts, regs); |
829 | * When we are idle and the tick is stopped, we have to touch | ||
830 | * the watchdog as we might not schedule for a really long | ||
831 | * time. This happens on complete idle SMP systems while | ||
832 | * waiting on the login prompt. We also increment the "start of | ||
833 | * idle" jiffy stamp so the idle accounting adjustment we do | ||
834 | * when we go busy again does not account too much ticks. | ||
835 | */ | ||
836 | if (ts->tick_stopped) { | ||
837 | touch_softlockup_watchdog(); | ||
838 | if (is_idle_task(current)) | ||
839 | ts->idle_jiffies++; | ||
840 | } | ||
841 | update_process_times(user_mode(regs)); | ||
842 | profile_tick(CPU_PROFILING); | ||
843 | } | ||
844 | 829 | ||
845 | hrtimer_forward(timer, now, tick_period); | 830 | hrtimer_forward(timer, now, tick_period); |
846 | 831 | ||