diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-02-12 09:00:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-13 07:04:05 -0500 |
commit | 3997ad317fdf9ecdb5702e2b4fd1f8229814ff8c (patch) | |
tree | be0b1802f65e85157d97acac38f1bd310ba6d626 /kernel | |
parent | 37bed90094fdb1eea6e4afec6a200d4e60143e55 (diff) |
timers: more consistently use clock vs timer
While reviewing the manpages, I noticed I'd missed some clock vs timer sites.
Make sure that all timer functions call cpu_timer_sample_group() and not
cpu_clock_sample_group(). This ensures that we enable the process wide timer
in time, and therefore pay the O(n) thread group cost from the syscall.
Not doing it here, will result in the first jiffy tick after setting the timer
doing this, resulting in a very expensive tick (but only once) and a delay in
actually starting the timer.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/posix-cpu-timers.c | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 2313a4cc14ea..e976e505648d 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -681,6 +681,33 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
681 | } | 681 | } |
682 | 682 | ||
683 | /* | 683 | /* |
684 | * Sample a process (thread group) timer for the given group_leader task. | ||
685 | * Must be called with tasklist_lock held for reading. | ||
686 | */ | ||
687 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
688 | struct task_struct *p, | ||
689 | union cpu_time_count *cpu) | ||
690 | { | ||
691 | struct task_cputime cputime; | ||
692 | |||
693 | thread_group_cputimer(p, &cputime); | ||
694 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
695 | default: | ||
696 | return -EINVAL; | ||
697 | case CPUCLOCK_PROF: | ||
698 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
699 | break; | ||
700 | case CPUCLOCK_VIRT: | ||
701 | cpu->cpu = cputime.utime; | ||
702 | break; | ||
703 | case CPUCLOCK_SCHED: | ||
704 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
705 | break; | ||
706 | } | ||
707 | return 0; | ||
708 | } | ||
709 | |||
710 | /* | ||
684 | * Guts of sys_timer_settime for CPU timers. | 711 | * Guts of sys_timer_settime for CPU timers. |
685 | * This is called with the timer locked and interrupts disabled. | 712 | * This is called with the timer locked and interrupts disabled. |
686 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | 713 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
@@ -741,7 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
741 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 768 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
742 | cpu_clock_sample(timer->it_clock, p, &val); | 769 | cpu_clock_sample(timer->it_clock, p, &val); |
743 | } else { | 770 | } else { |
744 | cpu_clock_sample_group(timer->it_clock, p, &val); | 771 | cpu_timer_sample_group(timer->it_clock, p, &val); |
745 | } | 772 | } |
746 | 773 | ||
747 | if (old) { | 774 | if (old) { |
@@ -889,7 +916,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
889 | read_unlock(&tasklist_lock); | 916 | read_unlock(&tasklist_lock); |
890 | goto dead; | 917 | goto dead; |
891 | } else { | 918 | } else { |
892 | cpu_clock_sample_group(timer->it_clock, p, &now); | 919 | cpu_timer_sample_group(timer->it_clock, p, &now); |
893 | clear_dead = (unlikely(p->exit_state) && | 920 | clear_dead = (unlikely(p->exit_state) && |
894 | thread_group_empty(p)); | 921 | thread_group_empty(p)); |
895 | } | 922 | } |
@@ -1244,7 +1271,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1244 | clear_dead_task(timer, now); | 1271 | clear_dead_task(timer, now); |
1245 | goto out_unlock; | 1272 | goto out_unlock; |
1246 | } | 1273 | } |
1247 | cpu_clock_sample_group(timer->it_clock, p, &now); | 1274 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1248 | bump_cpu_timer(timer, now); | 1275 | bump_cpu_timer(timer, now); |
1249 | /* Leave the tasklist_lock locked for the call below. */ | 1276 | /* Leave the tasklist_lock locked for the call below. */ |
1250 | } | 1277 | } |
@@ -1409,33 +1436,6 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1409 | } | 1436 | } |
1410 | 1437 | ||
1411 | /* | 1438 | /* |
1412 | * Sample a process (thread group) timer for the given group_leader task. | ||
1413 | * Must be called with tasklist_lock held for reading. | ||
1414 | */ | ||
1415 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
1416 | struct task_struct *p, | ||
1417 | union cpu_time_count *cpu) | ||
1418 | { | ||
1419 | struct task_cputime cputime; | ||
1420 | |||
1421 | thread_group_cputimer(p, &cputime); | ||
1422 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
1423 | default: | ||
1424 | return -EINVAL; | ||
1425 | case CPUCLOCK_PROF: | ||
1426 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
1427 | break; | ||
1428 | case CPUCLOCK_VIRT: | ||
1429 | cpu->cpu = cputime.utime; | ||
1430 | break; | ||
1431 | case CPUCLOCK_SCHED: | ||
1432 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
1433 | break; | ||
1434 | } | ||
1435 | return 0; | ||
1436 | } | ||
1437 | |||
1438 | /* | ||
1439 | * Set one of the process-wide special case CPU timers. | 1439 | * Set one of the process-wide special case CPU timers. |
1440 | * The tsk->sighand->siglock must be held by the caller. | 1440 | * The tsk->sighand->siglock must be held by the caller. |
1441 | * The *newval argument is relative and we update it to be absolute, *oldval | 1441 | * The *newval argument is relative and we update it to be absolute, *oldval |