aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
authorHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>2009-12-02 03:28:07 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-02 11:32:40 -0500
commit0cf55e1ec08bb5a22e068309e2d8ba1180ab4239 (patch)
tree6102662a9594d51155bee11666fe8517fcbe6039 /kernel/exit.c
parentd99ca3b977fc5a93141304f571475c2af9e6c1c5 (diff)
sched, cputime: Introduce thread_group_times()
This is a real fix for problem of utime/stime values decreasing described in the thread: http://lkml.org/lkml/2009/11/3/522 Now cputime is accounted in the following way: - {u,s}time in task_struct are increased every time when the thread is interrupted by a tick (timer interrupt). - When a thread exits, its {u,s}time are added to signal->{u,s}time, after adjusted by task_times(). - When all threads in a thread_group exits, accumulated {u,s}time (and also c{u,s}time) in signal struct are added to c{u,s}time in signal struct of the group's parent. So {u,s}time in task struct are "raw" tick count, while {u,s}time and c{u,s}time in signal struct are "adjusted" values. And accounted values are used by: - task_times(), to get cputime of a thread: This function returns adjusted values that originates from raw {u,s}time and scaled by sum_exec_runtime that accounted by CFS. - thread_group_cputime(), to get cputime of a thread group: This function returns sum of all {u,s}time of living threads in the group, plus {u,s}time in the signal struct that is sum of adjusted cputimes of all exited threads belonged to the group. The problem is the return value of thread_group_cputime(), because it is mixed sum of "raw" value and "adjusted" value: group's {u,s}time = foreach(thread){{u,s}time} + exited({u,s}time) This misbehavior can break {u,s}time monotonicity. Assume that if there is a thread that have raw values greater than adjusted values (e.g. interrupted by 1000Hz ticks 50 times but only runs 45ms) and if it exits, cputime will decrease (e.g. -5ms). To fix this, we could do: group's {u,s}time = foreach(t){task_times(t)} + exited({u,s}time) But task_times() contains hard divisions, so applying it for every thread should be avoided. This patch fixes the above problem in the following way: - Modify thread's exit (= __exit_signal()) not to use task_times(). It means {u,s}time in signal struct accumulates raw values instead of adjusted values. As the result it makes thread_group_cputime() to return pure sum of "raw" values. - Introduce a new function thread_group_times(*task, *utime, *stime) that converts "raw" values of thread_group_cputime() to "adjusted" values, in same calculation procedure as task_times(). - Modify group's exit (= wait_task_zombie()) to use this introduced thread_group_times(). It make c{u,s}time in signal struct to have adjusted values like before this patch. - Replace some thread_group_cputime() by thread_group_times(). This replacements are only applied where conveys the "adjusted" cputime to users, and where already uses task_times() near by it. (i.e. sys_times(), getrusage(), and /proc/<PID>/stat.) This patch have a positive side effect: - Before this patch, if a group contains many short-life threads (e.g. runs 0.9ms and not interrupted by ticks), the group's cputime could be invisible since thread's cputime was accumulated after adjusted: imagine adjustment function as adj(ticks, runtime), {adj(0, 0.9) + adj(0, 0.9) + ....} = {0 + 0 + ....} = 0. After this patch it will not happen because the adjustment is applied after accumulated. v2: - remove if()s, put new variables into signal_struct. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Spencer Candland <spencer@bluehost.com> Cc: Americo Wang <xiyou.wangcong@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> LKML-Reference: <4B162517.8040909@jp.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 2eaf68b634e3..b221ad65fd20 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -91,8 +91,6 @@ static void __exit_signal(struct task_struct *tsk)
91 if (atomic_dec_and_test(&sig->count)) 91 if (atomic_dec_and_test(&sig->count))
92 posix_cpu_timers_exit_group(tsk); 92 posix_cpu_timers_exit_group(tsk);
93 else { 93 else {
94 cputime_t utime, stime;
95
96 /* 94 /*
97 * If there is any task waiting for the group exit 95 * If there is any task waiting for the group exit
98 * then notify it: 96 * then notify it:
@@ -112,9 +110,8 @@ static void __exit_signal(struct task_struct *tsk)
112 * We won't ever get here for the group leader, since it 110 * We won't ever get here for the group leader, since it
113 * will have been the last reference on the signal_struct. 111 * will have been the last reference on the signal_struct.
114 */ 112 */
115 task_times(tsk, &utime, &stime); 113 sig->utime = cputime_add(sig->utime, tsk->utime);
116 sig->utime = cputime_add(sig->utime, utime); 114 sig->stime = cputime_add(sig->stime, tsk->stime);
117 sig->stime = cputime_add(sig->stime, stime);
118 sig->gtime = cputime_add(sig->gtime, tsk->gtime); 115 sig->gtime = cputime_add(sig->gtime, tsk->gtime);
119 sig->min_flt += tsk->min_flt; 116 sig->min_flt += tsk->min_flt;
120 sig->maj_flt += tsk->maj_flt; 117 sig->maj_flt += tsk->maj_flt;
@@ -1208,6 +1205,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1208 struct signal_struct *psig; 1205 struct signal_struct *psig;
1209 struct signal_struct *sig; 1206 struct signal_struct *sig;
1210 unsigned long maxrss; 1207 unsigned long maxrss;
1208 cputime_t tgutime, tgstime;
1211 1209
1212 /* 1210 /*
1213 * The resource counters for the group leader are in its 1211 * The resource counters for the group leader are in its
@@ -1223,20 +1221,23 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1223 * need to protect the access to parent->signal fields, 1221 * need to protect the access to parent->signal fields,
1224 * as other threads in the parent group can be right 1222 * as other threads in the parent group can be right
1225 * here reaping other children at the same time. 1223 * here reaping other children at the same time.
1224 *
1225 * We use thread_group_times() to get times for the thread
1226 * group, which consolidates times for all threads in the
1227 * group including the group leader.
1226 */ 1228 */
1229 thread_group_times(p, &tgutime, &tgstime);
1227 spin_lock_irq(&p->real_parent->sighand->siglock); 1230 spin_lock_irq(&p->real_parent->sighand->siglock);
1228 psig = p->real_parent->signal; 1231 psig = p->real_parent->signal;
1229 sig = p->signal; 1232 sig = p->signal;
1230 psig->cutime = 1233 psig->cutime =
1231 cputime_add(psig->cutime, 1234 cputime_add(psig->cutime,
1232 cputime_add(p->utime, 1235 cputime_add(tgutime,
1233 cputime_add(sig->utime, 1236 sig->cutime));
1234 sig->cutime)));
1235 psig->cstime = 1237 psig->cstime =
1236 cputime_add(psig->cstime, 1238 cputime_add(psig->cstime,
1237 cputime_add(p->stime, 1239 cputime_add(tgstime,
1238 cputime_add(sig->stime, 1240 sig->cstime));
1239 sig->cstime)));
1240 psig->cgtime = 1241 psig->cgtime =
1241 cputime_add(psig->cgtime, 1242 cputime_add(psig->cgtime,
1242 cputime_add(p->gtime, 1243 cputime_add(p->gtime,