aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>2009-11-26 00:48:30 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-26 06:59:19 -0500
commitd180c5bccec02612256fd8076ff3c1fac3429553 (patch)
tree1ef4a45c81531645640380965916c68bbe7f6abb /kernel
parent16bc67edeb49b531940b2ba6c183780a1b5c472d (diff)
sched: Introduce task_times() to replace task_{u,s}time() pair
Functions task_{u,s}time() are called in pair in almost all cases. However task_stime() is implemented to call task_utime() from its inside, so such paired calls run task_utime() twice. It means we do heavy divisions (div_u64 + do_div) twice to get utime and stime which can be obtained at same time by one set of divisions. This patch introduces a function task_times(*tsk, *utime, *stime) to retrieve utime and stime at once in better, optimized way. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Spencer Candland <spencer@bluehost.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Americo Wang <xiyou.wangcong@gmail.com> LKML-Reference: <4B0E16AE.906@jp.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/sched.c55
-rw-r--r--kernel/sys.c3
3 files changed, 41 insertions, 24 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index f7864ac2ecc1..29068ab2670a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -91,6 +91,8 @@ static void __exit_signal(struct task_struct *tsk)
91 if (atomic_dec_and_test(&sig->count)) 91 if (atomic_dec_and_test(&sig->count))
92 posix_cpu_timers_exit_group(tsk); 92 posix_cpu_timers_exit_group(tsk);
93 else { 93 else {
94 cputime_t utime, stime;
95
94 /* 96 /*
95 * If there is any task waiting for the group exit 97 * If there is any task waiting for the group exit
96 * then notify it: 98 * then notify it:
@@ -110,8 +112,9 @@ static void __exit_signal(struct task_struct *tsk)
110 * We won't ever get here for the group leader, since it 112 * We won't ever get here for the group leader, since it
111 * will have been the last reference on the signal_struct. 113 * will have been the last reference on the signal_struct.
112 */ 114 */
113 sig->utime = cputime_add(sig->utime, task_utime(tsk)); 115 task_times(tsk, &utime, &stime);
114 sig->stime = cputime_add(sig->stime, task_stime(tsk)); 116 sig->utime = cputime_add(sig->utime, utime);
117 sig->stime = cputime_add(sig->stime, stime);
115 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); 118 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
116 sig->min_flt += tsk->min_flt; 119 sig->min_flt += tsk->min_flt;
117 sig->maj_flt += tsk->maj_flt; 120 sig->maj_flt += tsk->maj_flt;
diff --git a/kernel/sched.c b/kernel/sched.c
index 315ba4059f93..475a6f2b7158 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5191,6 +5191,14 @@ cputime_t task_stime(struct task_struct *p)
5191{ 5191{
5192 return p->stime; 5192 return p->stime;
5193} 5193}
5194
5195void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5196{
5197 if (ut)
5198 *ut = task_utime(p);
5199 if (st)
5200 *st = task_stime(p);
5201}
5194#else 5202#else
5195 5203
5196#ifndef nsecs_to_cputime 5204#ifndef nsecs_to_cputime
@@ -5198,41 +5206,48 @@ cputime_t task_stime(struct task_struct *p)
5198 msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC)) 5206 msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
5199#endif 5207#endif
5200 5208
5201cputime_t task_utime(struct task_struct *p) 5209void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5202{ 5210{
5203 cputime_t utime = p->utime, total = utime + p->stime; 5211 cputime_t rtime, utime = p->utime, total = utime + p->stime;
5204 u64 temp;
5205 5212
5206 /* 5213 /*
5207 * Use CFS's precise accounting: 5214 * Use CFS's precise accounting:
5208 */ 5215 */
5209 temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime); 5216 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
5210 5217
5211 if (total) { 5218 if (total) {
5212 temp *= utime; 5219 u64 temp;
5220
5221 temp = (u64)(rtime * utime);
5213 do_div(temp, total); 5222 do_div(temp, total);
5214 } 5223 utime = (cputime_t)temp;
5215 utime = (cputime_t)temp; 5224 } else
5225 utime = rtime;
5216 5226
5227 /*
5228 * Compare with previous values, to keep monotonicity:
5229 */
5217 p->prev_utime = max(p->prev_utime, utime); 5230 p->prev_utime = max(p->prev_utime, utime);
5218 return p->prev_utime; 5231 p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
5232
5233 if (ut)
5234 *ut = p->prev_utime;
5235 if (st)
5236 *st = p->prev_stime;
5237}
5238
5239cputime_t task_utime(struct task_struct *p)
5240{
5241 cputime_t utime;
5242 task_times(p, &utime, NULL);
5243 return utime;
5219} 5244}
5220 5245
5221cputime_t task_stime(struct task_struct *p) 5246cputime_t task_stime(struct task_struct *p)
5222{ 5247{
5223 cputime_t stime; 5248 cputime_t stime;
5224 5249 task_times(p, NULL, &stime);
5225 /* 5250 return stime;
5226 * Use CFS's precise accounting. (we subtract utime from
5227 * the total, to make sure the total observed by userspace
5228 * grows monotonically - apps rely on that):
5229 */
5230 stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
5231
5232 if (stime >= 0)
5233 p->prev_stime = max(p->prev_stime, stime);
5234
5235 return p->prev_stime;
5236} 5251}
5237#endif 5252#endif
5238 5253
diff --git a/kernel/sys.c b/kernel/sys.c
index ce17760d9c51..bbdfce0d4347 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1346,8 +1346,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1346 utime = stime = cputime_zero; 1346 utime = stime = cputime_zero;
1347 1347
1348 if (who == RUSAGE_THREAD) { 1348 if (who == RUSAGE_THREAD) {
1349 utime = task_utime(current); 1349 task_times(current, &utime, &stime);
1350 stime = task_stime(current);
1351 accumulate_thread_rusage(p, r); 1350 accumulate_thread_rusage(p, r);
1352 maxrss = p->signal->maxrss; 1351 maxrss = p->signal->maxrss;
1353 goto out; 1352 goto out;