aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>2009-11-11 23:33:45 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-12 09:23:47 -0500
commit761b1d26df542fd5eb348837351e4d2f3bc7bffe (patch)
tree3c548070fdf81b618d32f9878b41fb16d26ffcde /kernel/sched.c
parentffd44db5f02af32bcc25a8eb5981bf02a141cdab (diff)
sched: Fix granularity of task_u/stime()
Originally task_s/utime() were designed to return clock_t but later changed to return cputime_t by following commit: commit efe567fc8281661524ffa75477a7c4ca9b466c63 Author: Christian Borntraeger <borntraeger@de.ibm.com> Date: Thu Aug 23 15:18:02 2007 +0200 It only changed the type of return value, but not the implementation. As the result the granularity of task_s/utime() is still that of clock_t, not that of cputime_t. So using task_s/utime() in __exit_signal() makes values accumulated to the signal struct to be rounded and coarse grained. This patch removes casts to clock_t in task_u/stime(), to keep granularity of cputime_t over the calculation. v2: Use div_u64() to avoid error "undefined reference to `__udivdi3`" on some 32bit systems. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: xiyou.wangcong@gmail.com Cc: Spencer Candland <spencer@bluehost.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> LKML-Reference: <4AFB9029.9000208@jp.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 43e61fa04dc7..ab9a034c4a17 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5156,41 +5156,45 @@ cputime_t task_stime(struct task_struct *p)
5156 return p->stime; 5156 return p->stime;
5157} 5157}
5158#else 5158#else
5159
5160#ifndef nsecs_to_cputime
5161# define nsecs_to_cputime(__nsecs) \
5162 msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
5163#endif
5164
5159cputime_t task_utime(struct task_struct *p) 5165cputime_t task_utime(struct task_struct *p)
5160{ 5166{
5161 clock_t utime = cputime_to_clock_t(p->utime), 5167 cputime_t utime = p->utime, total = utime + p->stime;
5162 total = utime + cputime_to_clock_t(p->stime);
5163 u64 temp; 5168 u64 temp;
5164 5169
5165 /* 5170 /*
5166 * Use CFS's precise accounting: 5171 * Use CFS's precise accounting:
5167 */ 5172 */
5168 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); 5173 temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime);
5169 5174
5170 if (total) { 5175 if (total) {
5171 temp *= utime; 5176 temp *= utime;
5172 do_div(temp, total); 5177 do_div(temp, total);
5173 } 5178 }
5174 utime = (clock_t)temp; 5179 utime = (cputime_t)temp;
5175 5180
5176 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); 5181 p->prev_utime = max(p->prev_utime, utime);
5177 return p->prev_utime; 5182 return p->prev_utime;
5178} 5183}
5179 5184
5180cputime_t task_stime(struct task_struct *p) 5185cputime_t task_stime(struct task_struct *p)
5181{ 5186{
5182 clock_t stime; 5187 cputime_t stime;
5183 5188
5184 /* 5189 /*
5185 * Use CFS's precise accounting. (we subtract utime from 5190 * Use CFS's precise accounting. (we subtract utime from
5186 * the total, to make sure the total observed by userspace 5191 * the total, to make sure the total observed by userspace
5187 * grows monotonically - apps rely on that): 5192 * grows monotonically - apps rely on that):
5188 */ 5193 */
5189 stime = nsec_to_clock_t(p->se.sum_exec_runtime) - 5194 stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
5190 cputime_to_clock_t(task_utime(p));
5191 5195
5192 if (stime >= 0) 5196 if (stime >= 0)
5193 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); 5197 p->prev_stime = max(p->prev_stime, stime);
5194 5198
5195 return p->prev_stime; 5199 return p->prev_stime;
5196} 5200}