aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
authorHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>2009-03-31 03:56:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-01 10:44:16 -0400
commitc5f8d99585d7b5b7e857fabf8aefd0174903a98c (patch)
tree504127a7d4b6a0e3aee56d0822e5e8b23f8062d7 /kernel/posix-cpu-timers.c
parent13b8bd0a5713bdf05659019badd7c0407984ece1 (diff)
posixtimers, sched: Fix posix clock monotonicity
Impact: Regression fix (against clock_gettime() backwarding bug) This patch re-introduces a couple of functions, task_sched_runtime and thread_group_sched_runtime, which was once removed at the time of 2.6.28-rc1. These functions protect the sampling of thread/process clock with rq lock. This rq lock is required not to update rq->clock during the sampling. i.e. The clock_gettime() may return ((accounted runtime before update) + (delta after update)) that is less than what it should be. v2 -> v3: - Rename static helper function __task_delta_exec() to do_task_delta_exec() since -tip tree already has a __task_delta_exec() of different version. v1 -> v2: - Revises comments of function and patch description. - Add note about accuracy of thread group's runtime. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: stable@kernel.org [2.6.28.x][2.6.29.x] LKML-Reference: <49D1CC93.4080401@jp.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index fa07da94d7be..4318c3085788 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
224 cpu->cpu = virt_ticks(p); 224 cpu->cpu = virt_ticks(p);
225 break; 225 break;
226 case CPUCLOCK_SCHED: 226 case CPUCLOCK_SCHED:
227 cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); 227 cpu->sched = task_sched_runtime(p);
228 break; 228 break;
229 } 229 }
230 return 0; 230 return 0;
@@ -240,18 +240,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
240{ 240{
241 struct task_cputime cputime; 241 struct task_cputime cputime;
242 242
243 thread_group_cputime(p, &cputime);
244 switch (CPUCLOCK_WHICH(which_clock)) { 243 switch (CPUCLOCK_WHICH(which_clock)) {
245 default: 244 default:
246 return -EINVAL; 245 return -EINVAL;
247 case CPUCLOCK_PROF: 246 case CPUCLOCK_PROF:
247 thread_group_cputime(p, &cputime);
248 cpu->cpu = cputime_add(cputime.utime, cputime.stime); 248 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
249 break; 249 break;
250 case CPUCLOCK_VIRT: 250 case CPUCLOCK_VIRT:
251 thread_group_cputime(p, &cputime);
251 cpu->cpu = cputime.utime; 252 cpu->cpu = cputime.utime;
252 break; 253 break;
253 case CPUCLOCK_SCHED: 254 case CPUCLOCK_SCHED:
254 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); 255 cpu->sched = thread_group_sched_runtime(p);
255 break; 256 break;
256 } 257 }
257 return 0; 258 return 0;