aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c36
1 files changed, 13 insertions, 23 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 29eb227e33f7..0333abdda85e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3156,28 +3156,23 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
3156EXPORT_PER_CPU_SYMBOL(kstat); 3156EXPORT_PER_CPU_SYMBOL(kstat);
3157 3157
3158/* 3158/*
3159 * This is called on clock ticks and on context switches. 3159 * Return p->sum_exec_runtime plus any more ns on the sched_clock
3160 * Bank in p->sched_time the ns elapsed since the last tick or switch. 3160 * that have not yet been banked in case the task is currently running.
3161 */ 3161 */
3162static inline void 3162unsigned long long task_sched_runtime(struct task_struct *p)
3163update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
3164{ 3163{
3165 p->sched_time += now - p->last_ran;
3166 p->last_ran = rq->most_recent_timestamp = now;
3167}
3168
3169/*
3170 * Return current->sched_time plus any more ns on the sched_clock
3171 * that have not yet been banked.
3172 */
3173unsigned long long current_sched_time(const struct task_struct *p)
3174{
3175 unsigned long long ns;
3176 unsigned long flags; 3164 unsigned long flags;
3165 u64 ns, delta_exec;
3166 struct rq *rq;
3177 3167
3178 local_irq_save(flags); 3168 rq = task_rq_lock(p, &flags);
3179 ns = p->sched_time + sched_clock() - p->last_ran; 3169 ns = p->se.sum_exec_runtime;
3180 local_irq_restore(flags); 3170 if (rq->curr == p) {
3171 delta_exec = rq_clock(rq) - p->se.exec_start;
3172 if ((s64)delta_exec > 0)
3173 ns += delta_exec;
3174 }
3175 task_rq_unlock(rq, &flags);
3181 3176
3182 return ns; 3177 return ns;
3183} 3178}
@@ -3360,14 +3355,11 @@ out_unlock:
3360 */ 3355 */
3361void scheduler_tick(void) 3356void scheduler_tick(void)
3362{ 3357{
3363 unsigned long long now = sched_clock();
3364 struct task_struct *p = current; 3358 struct task_struct *p = current;
3365 int cpu = smp_processor_id(); 3359 int cpu = smp_processor_id();
3366 int idle_at_tick = idle_cpu(cpu); 3360 int idle_at_tick = idle_cpu(cpu);
3367 struct rq *rq = cpu_rq(cpu); 3361 struct rq *rq = cpu_rq(cpu);
3368 3362
3369 update_cpu_clock(p, rq, now);
3370
3371 if (!idle_at_tick) 3363 if (!idle_at_tick)
3372 task_running_tick(rq, p); 3364 task_running_tick(rq, p);
3373#ifdef CONFIG_SMP 3365#ifdef CONFIG_SMP
@@ -3550,8 +3542,6 @@ switch_tasks:
3550 clear_tsk_need_resched(prev); 3542 clear_tsk_need_resched(prev);
3551 rcu_qsctr_inc(task_cpu(prev)); 3543 rcu_qsctr_inc(task_cpu(prev));
3552 3544
3553 update_cpu_clock(prev, rq, now);
3554
3555 prev->sleep_avg -= run_time; 3545 prev->sleep_avg -= run_time;
3556 if ((long)prev->sleep_avg <= 0) 3546 if ((long)prev->sleep_avg <= 0)
3557 prev->sleep_avg = 0; 3547 prev->sleep_avg = 0;