diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/exit.c | 6 | ||||
| -rw-r--r-- | kernel/sched.c | 59 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 3 |
3 files changed, 65 insertions, 3 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 25ed2ad986df..16395644a98f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -112,9 +112,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 112 | * We won't ever get here for the group leader, since it | 112 | * We won't ever get here for the group leader, since it |
| 113 | * will have been the last reference on the signal_struct. | 113 | * will have been the last reference on the signal_struct. |
| 114 | */ | 114 | */ |
| 115 | sig->utime = cputime_add(sig->utime, tsk->utime); | 115 | sig->utime = cputime_add(sig->utime, task_utime(tsk)); |
| 116 | sig->stime = cputime_add(sig->stime, tsk->stime); | 116 | sig->stime = cputime_add(sig->stime, task_stime(tsk)); |
| 117 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); | 117 | sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); |
| 118 | sig->min_flt += tsk->min_flt; | 118 | sig->min_flt += tsk->min_flt; |
| 119 | sig->maj_flt += tsk->maj_flt; | 119 | sig->maj_flt += tsk->maj_flt; |
| 120 | sig->nvcsw += tsk->nvcsw; | 120 | sig->nvcsw += tsk->nvcsw; |
diff --git a/kernel/sched.c b/kernel/sched.c index 9a1ddb84e26d..1a5f73c1fcdc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4179,6 +4179,65 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
| 4179 | } | 4179 | } |
| 4180 | 4180 | ||
| 4181 | /* | 4181 | /* |
| 4182 | * Use precise platform statistics if available: | ||
| 4183 | */ | ||
| 4184 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 4185 | cputime_t task_utime(struct task_struct *p) | ||
| 4186 | { | ||
| 4187 | return p->utime; | ||
| 4188 | } | ||
| 4189 | |||
| 4190 | cputime_t task_stime(struct task_struct *p) | ||
| 4191 | { | ||
| 4192 | return p->stime; | ||
| 4193 | } | ||
| 4194 | #else | ||
| 4195 | cputime_t task_utime(struct task_struct *p) | ||
| 4196 | { | ||
| 4197 | clock_t utime = cputime_to_clock_t(p->utime), | ||
| 4198 | total = utime + cputime_to_clock_t(p->stime); | ||
| 4199 | u64 temp; | ||
| 4200 | |||
| 4201 | /* | ||
| 4202 | * Use CFS's precise accounting: | ||
| 4203 | */ | ||
| 4204 | temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); | ||
| 4205 | |||
| 4206 | if (total) { | ||
| 4207 | temp *= utime; | ||
| 4208 | do_div(temp, total); | ||
| 4209 | } | ||
| 4210 | utime = (clock_t)temp; | ||
| 4211 | |||
| 4212 | p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); | ||
| 4213 | return p->prev_utime; | ||
| 4214 | } | ||
| 4215 | |||
| 4216 | cputime_t task_stime(struct task_struct *p) | ||
| 4217 | { | ||
| 4218 | clock_t stime; | ||
| 4219 | |||
| 4220 | /* | ||
| 4221 | * Use CFS's precise accounting. (we subtract utime from | ||
| 4222 | * the total, to make sure the total observed by userspace | ||
| 4223 | * grows monotonically - apps rely on that): | ||
| 4224 | */ | ||
| 4225 | stime = nsec_to_clock_t(p->se.sum_exec_runtime) - | ||
| 4226 | cputime_to_clock_t(task_utime(p)); | ||
| 4227 | |||
| 4228 | if (stime >= 0) | ||
| 4229 | p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); | ||
| 4230 | |||
| 4231 | return p->prev_stime; | ||
| 4232 | } | ||
| 4233 | #endif | ||
| 4234 | |||
| 4235 | inline cputime_t task_gtime(struct task_struct *p) | ||
| 4236 | { | ||
| 4237 | return p->gtime; | ||
| 4238 | } | ||
| 4239 | |||
| 4240 | /* | ||
| 4182 | * This function gets called by the timer code, with HZ frequency. | 4241 | * This function gets called by the timer code, with HZ frequency. |
| 4183 | * We call it with interrupts disabled. | 4242 | * We call it with interrupts disabled. |
| 4184 | * | 4243 | * |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 7a46bde78c66..a87b0468568b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -162,6 +162,8 @@ void tick_nohz_stop_idle(int cpu) | |||
| 162 | ts->idle_lastupdate = now; | 162 | ts->idle_lastupdate = now; |
| 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
| 164 | ts->idle_active = 0; | 164 | ts->idle_active = 0; |
| 165 | |||
| 166 | sched_clock_idle_wakeup_event(0); | ||
| 165 | } | 167 | } |
| 166 | } | 168 | } |
| 167 | 169 | ||
| @@ -177,6 +179,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | |||
| 177 | } | 179 | } |
| 178 | ts->idle_entrytime = now; | 180 | ts->idle_entrytime = now; |
| 179 | ts->idle_active = 1; | 181 | ts->idle_active = 1; |
| 182 | sched_clock_idle_sleep_event(); | ||
| 180 | return now; | 183 | return now; |
| 181 | } | 184 | } |
| 182 | 185 | ||
