diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-04-21 05:05:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-21 05:05:47 -0400 |
commit | a166fcf04d848ffa09f0e831805553089f190cf4 (patch) | |
tree | 1fc97c397238692375f1ebf7a39746188f6424db /kernel | |
parent | 2727872dfe5d273f313f8a0c0dd0fcc58e96cde7 (diff) | |
parent | 555347f6c080d2f25265f981c963605b4dd3610d (diff) |
Merge branch 'timers/nohz-posix-timers-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/nohz
Pull posix cpu timers handling on full dynticks from Frederic Weisbecker.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/posix-cpu-timers.c | 76 | ||||
-rw-r--r-- | kernel/time/Kconfig | 1 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 51 |
3 files changed, 112 insertions, 16 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 8fd709c9bb58..84d5cb372ed5 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | #include <trace/events/timer.h> | 11 | #include <trace/events/timer.h> |
12 | #include <linux/random.h> | 12 | #include <linux/random.h> |
13 | #include <linux/tick.h> | ||
14 | #include <linux/workqueue.h> | ||
13 | 15 | ||
14 | /* | 16 | /* |
15 | * Called after updating RLIMIT_CPU to run cpu timer and update | 17 | * Called after updating RLIMIT_CPU to run cpu timer and update |
@@ -153,6 +155,21 @@ static void bump_cpu_timer(struct k_itimer *timer, | |||
153 | } | 155 | } |
154 | } | 156 | } |
155 | 157 | ||
158 | /** | ||
159 | * task_cputime_zero - Check a task_cputime struct for all zero fields. | ||
160 | * | ||
161 | * @cputime: The struct to compare. | ||
162 | * | ||
163 | * Checks @cputime to see if all fields are zero. Returns true if all fields | ||
164 | * are zero, false if any field is nonzero. | ||
165 | */ | ||
166 | static inline int task_cputime_zero(const struct task_cputime *cputime) | ||
167 | { | ||
168 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) | ||
169 | return 1; | ||
170 | return 0; | ||
171 | } | ||
172 | |||
156 | static inline cputime_t prof_ticks(struct task_struct *p) | 173 | static inline cputime_t prof_ticks(struct task_struct *p) |
157 | { | 174 | { |
158 | cputime_t utime, stime; | 175 | cputime_t utime, stime; |
@@ -636,6 +653,37 @@ static int cpu_timer_sample_group(const clockid_t which_clock, | |||
636 | return 0; | 653 | return 0; |
637 | } | 654 | } |
638 | 655 | ||
656 | #ifdef CONFIG_NO_HZ_FULL | ||
657 | static void nohz_kick_work_fn(struct work_struct *work) | ||
658 | { | ||
659 | tick_nohz_full_kick_all(); | ||
660 | } | ||
661 | |||
662 | static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn); | ||
663 | |||
664 | /* | ||
665 | * We need the IPIs to be sent from sane process context. | ||
666 | * The posix cpu timers are always set with irqs disabled. | ||
667 | */ | ||
668 | static void posix_cpu_timer_kick_nohz(void) | ||
669 | { | ||
670 | schedule_work(&nohz_kick_work); | ||
671 | } | ||
672 | |||
673 | bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) | ||
674 | { | ||
675 | if (!task_cputime_zero(&tsk->cputime_expires)) | ||
676 | return true; | ||
677 | |||
678 | if (tsk->signal->cputimer.running) | ||
679 | return true; | ||
680 | |||
681 | return false; | ||
682 | } | ||
683 | #else | ||
684 | static inline void posix_cpu_timer_kick_nohz(void) { } | ||
685 | #endif | ||
686 | |||
639 | /* | 687 | /* |
640 | * Guts of sys_timer_settime for CPU timers. | 688 | * Guts of sys_timer_settime for CPU timers. |
641 | * This is called with the timer locked and interrupts disabled. | 689 | * This is called with the timer locked and interrupts disabled. |
@@ -794,6 +842,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
794 | sample_to_timespec(timer->it_clock, | 842 | sample_to_timespec(timer->it_clock, |
795 | old_incr, &old->it_interval); | 843 | old_incr, &old->it_interval); |
796 | } | 844 | } |
845 | if (!ret) | ||
846 | posix_cpu_timer_kick_nohz(); | ||
797 | return ret; | 847 | return ret; |
798 | } | 848 | } |
799 | 849 | ||
@@ -1008,21 +1058,6 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | |||
1008 | } | 1058 | } |
1009 | } | 1059 | } |
1010 | 1060 | ||
1011 | /** | ||
1012 | * task_cputime_zero - Check a task_cputime struct for all zero fields. | ||
1013 | * | ||
1014 | * @cputime: The struct to compare. | ||
1015 | * | ||
1016 | * Checks @cputime to see if all fields are zero. Returns true if all fields | ||
1017 | * are zero, false if any field is nonzero. | ||
1018 | */ | ||
1019 | static inline int task_cputime_zero(const struct task_cputime *cputime) | ||
1020 | { | ||
1021 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) | ||
1022 | return 1; | ||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | /* | 1061 | /* |
1027 | * Check for any per-thread CPU timers that have fired and move them | 1062 | * Check for any per-thread CPU timers that have fired and move them |
1028 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1063 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
@@ -1336,6 +1371,13 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1336 | cpu_timer_fire(timer); | 1371 | cpu_timer_fire(timer); |
1337 | spin_unlock(&timer->it_lock); | 1372 | spin_unlock(&timer->it_lock); |
1338 | } | 1373 | } |
1374 | |||
1375 | /* | ||
1376 | * In case some timers were rescheduled after the queue got emptied, | ||
1377 | * wake up full dynticks CPUs. | ||
1378 | */ | ||
1379 | if (tsk->signal->cputimer.running) | ||
1380 | posix_cpu_timer_kick_nohz(); | ||
1339 | } | 1381 | } |
1340 | 1382 | ||
1341 | /* | 1383 | /* |
@@ -1366,7 +1408,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1366 | } | 1408 | } |
1367 | 1409 | ||
1368 | if (!*newval) | 1410 | if (!*newval) |
1369 | return; | 1411 | goto out; |
1370 | *newval += now.cpu; | 1412 | *newval += now.cpu; |
1371 | } | 1413 | } |
1372 | 1414 | ||
@@ -1384,6 +1426,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1384 | tsk->signal->cputime_expires.virt_exp = *newval; | 1426 | tsk->signal->cputime_expires.virt_exp = *newval; |
1385 | break; | 1427 | break; |
1386 | } | 1428 | } |
1429 | out: | ||
1430 | posix_cpu_timer_kick_nohz(); | ||
1387 | } | 1431 | } |
1388 | 1432 | ||
1389 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | 1433 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 99c3f13dd478..f6a792ab4983 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
@@ -111,6 +111,7 @@ config NO_HZ_FULL | |||
111 | select RCU_USER_QS | 111 | select RCU_USER_QS |
112 | select RCU_NOCB_CPU | 112 | select RCU_NOCB_CPU |
113 | select CONTEXT_TRACKING_FORCE | 113 | select CONTEXT_TRACKING_FORCE |
114 | select IRQ_WORK | ||
114 | help | 115 | help |
115 | Adaptively try to shutdown the tick whenever possible, even when | 116 | Adaptively try to shutdown the tick whenever possible, even when |
116 | the CPU is running tasks. Typically this requires running a single | 117 | the CPU is running tasks. Typically this requires running a single |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a76e09044f9f..884a9f302a06 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -147,6 +147,57 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) | |||
147 | static cpumask_var_t nohz_full_mask; | 147 | static cpumask_var_t nohz_full_mask; |
148 | bool have_nohz_full_mask; | 148 | bool have_nohz_full_mask; |
149 | 149 | ||
150 | /* | ||
151 | * Re-evaluate the need for the tick on the current CPU | ||
152 | * and restart it if necessary. | ||
153 | */ | ||
154 | static void tick_nohz_full_check(void) | ||
155 | { | ||
156 | /* | ||
157 | * STUB for now, will be filled with the full tick stop/restart | ||
158 | * infrastructure patches | ||
159 | */ | ||
160 | } | ||
161 | |||
162 | static void nohz_full_kick_work_func(struct irq_work *work) | ||
163 | { | ||
164 | tick_nohz_full_check(); | ||
165 | } | ||
166 | |||
167 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | ||
168 | .func = nohz_full_kick_work_func, | ||
169 | }; | ||
170 | |||
171 | /* | ||
172 | * Kick the current CPU if it's full dynticks in order to force it to | ||
173 | * re-evaluate its dependency on the tick and restart it if necessary. | ||
174 | */ | ||
175 | void tick_nohz_full_kick(void) | ||
176 | { | ||
177 | if (tick_nohz_full_cpu(smp_processor_id())) | ||
178 | irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); | ||
179 | } | ||
180 | |||
181 | static void nohz_full_kick_ipi(void *info) | ||
182 | { | ||
183 | tick_nohz_full_check(); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Kick all full dynticks CPUs in order to force these to re-evaluate | ||
188 | * their dependency on the tick and restart it if necessary. | ||
189 | */ | ||
190 | void tick_nohz_full_kick_all(void) | ||
191 | { | ||
192 | if (!have_nohz_full_mask) | ||
193 | return; | ||
194 | |||
195 | preempt_disable(); | ||
196 | smp_call_function_many(nohz_full_mask, | ||
197 | nohz_full_kick_ipi, NULL, false); | ||
198 | preempt_enable(); | ||
199 | } | ||
200 | |||
150 | int tick_nohz_full_cpu(int cpu) | 201 | int tick_nohz_full_cpu(int cpu) |
151 | { | 202 | { |
152 | if (!have_nohz_full_mask) | 203 | if (!have_nohz_full_mask) |