aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
commit41b86e9c510ae66639bf29d3201e1d2384a7fde6 (patch)
treef340b94f53be08e2fbba2344deb985f9ecb4a036
parent20d315d42aed95423a7203e1d7e84086004b5a00 (diff)
sched: make posix-cpu-timers use CFS's accounting information
update the posix-cpu-timers code to use CFS's CPU accounting information. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/posix-cpu-timers.c34
-rw-r--r--kernel/sched.c36
3 files changed, 33 insertions, 42 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 995eb407c234..3e7f1890e55d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -482,7 +482,8 @@ struct signal_struct {
482 * from jiffies_to_ns(utime + stime) if sched_clock uses something 482 * from jiffies_to_ns(utime + stime) if sched_clock uses something
483 * other than jiffies.) 483 * other than jiffies.)
484 */ 484 */
485 unsigned long long sched_time; 485 unsigned long sched_time;
486 unsigned long long sum_sched_runtime;
486 487
487 /* 488 /*
488 * We don't bother to synchronize most readers of this at all, 489 * We don't bother to synchronize most readers of this at all,
@@ -1308,7 +1309,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1308 1309
1309extern unsigned long long sched_clock(void); 1310extern unsigned long long sched_clock(void);
1310extern unsigned long long 1311extern unsigned long long
1311current_sched_time(const struct task_struct *current_task); 1312task_sched_runtime(struct task_struct *task);
1312 1313
1313/* sched_exec is called by processes performing an exec */ 1314/* sched_exec is called by processes performing an exec */
1314#ifdef CONFIG_SMP 1315#ifdef CONFIG_SMP
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 1de710e18373..b53c8fcd9d82 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -161,7 +161,7 @@ static inline cputime_t virt_ticks(struct task_struct *p)
161} 161}
162static inline unsigned long long sched_ns(struct task_struct *p) 162static inline unsigned long long sched_ns(struct task_struct *p)
163{ 163{
164 return (p == current) ? current_sched_time(p) : p->sched_time; 164 return task_sched_runtime(p);
165} 165}
166 166
167int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) 167int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
@@ -246,10 +246,10 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx,
246 } while (t != p); 246 } while (t != p);
247 break; 247 break;
248 case CPUCLOCK_SCHED: 248 case CPUCLOCK_SCHED:
249 cpu->sched = p->signal->sched_time; 249 cpu->sched = p->signal->sum_sched_runtime;
250 /* Add in each other live thread. */ 250 /* Add in each other live thread. */
251 while ((t = next_thread(t)) != p) { 251 while ((t = next_thread(t)) != p) {
252 cpu->sched += t->sched_time; 252 cpu->sched += t->se.sum_exec_runtime;
253 } 253 }
254 cpu->sched += sched_ns(p); 254 cpu->sched += sched_ns(p);
255 break; 255 break;
@@ -422,7 +422,7 @@ int posix_cpu_timer_del(struct k_itimer *timer)
422 */ 422 */
423static void cleanup_timers(struct list_head *head, 423static void cleanup_timers(struct list_head *head,
424 cputime_t utime, cputime_t stime, 424 cputime_t utime, cputime_t stime,
425 unsigned long long sched_time) 425 unsigned long long sum_exec_runtime)
426{ 426{
427 struct cpu_timer_list *timer, *next; 427 struct cpu_timer_list *timer, *next;
428 cputime_t ptime = cputime_add(utime, stime); 428 cputime_t ptime = cputime_add(utime, stime);
@@ -451,10 +451,10 @@ static void cleanup_timers(struct list_head *head,
451 ++head; 451 ++head;
452 list_for_each_entry_safe(timer, next, head, entry) { 452 list_for_each_entry_safe(timer, next, head, entry) {
453 list_del_init(&timer->entry); 453 list_del_init(&timer->entry);
454 if (timer->expires.sched < sched_time) { 454 if (timer->expires.sched < sum_exec_runtime) {
455 timer->expires.sched = 0; 455 timer->expires.sched = 0;
456 } else { 456 } else {
457 timer->expires.sched -= sched_time; 457 timer->expires.sched -= sum_exec_runtime;
458 } 458 }
459 } 459 }
460} 460}
@@ -467,7 +467,7 @@ static void cleanup_timers(struct list_head *head,
467void posix_cpu_timers_exit(struct task_struct *tsk) 467void posix_cpu_timers_exit(struct task_struct *tsk)
468{ 468{
469 cleanup_timers(tsk->cpu_timers, 469 cleanup_timers(tsk->cpu_timers,
470 tsk->utime, tsk->stime, tsk->sched_time); 470 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
471 471
472} 472}
473void posix_cpu_timers_exit_group(struct task_struct *tsk) 473void posix_cpu_timers_exit_group(struct task_struct *tsk)
@@ -475,7 +475,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
475 cleanup_timers(tsk->signal->cpu_timers, 475 cleanup_timers(tsk->signal->cpu_timers,
476 cputime_add(tsk->utime, tsk->signal->utime), 476 cputime_add(tsk->utime, tsk->signal->utime),
477 cputime_add(tsk->stime, tsk->signal->stime), 477 cputime_add(tsk->stime, tsk->signal->stime),
478 tsk->sched_time + tsk->signal->sched_time); 478 tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
479} 479}
480 480
481 481
@@ -536,7 +536,7 @@ static void process_timer_rebalance(struct task_struct *p,
536 nsleft = max_t(unsigned long long, nsleft, 1); 536 nsleft = max_t(unsigned long long, nsleft, 1);
537 do { 537 do {
538 if (likely(!(t->flags & PF_EXITING))) { 538 if (likely(!(t->flags & PF_EXITING))) {
539 ns = t->sched_time + nsleft; 539 ns = t->se.sum_exec_runtime + nsleft;
540 if (t->it_sched_expires == 0 || 540 if (t->it_sched_expires == 0 ||
541 t->it_sched_expires > ns) { 541 t->it_sched_expires > ns) {
542 t->it_sched_expires = ns; 542 t->it_sched_expires = ns;
@@ -1004,7 +1004,7 @@ static void check_thread_timers(struct task_struct *tsk,
1004 struct cpu_timer_list *t = list_first_entry(timers, 1004 struct cpu_timer_list *t = list_first_entry(timers,
1005 struct cpu_timer_list, 1005 struct cpu_timer_list,
1006 entry); 1006 entry);
1007 if (!--maxfire || tsk->sched_time < t->expires.sched) { 1007 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
1008 tsk->it_sched_expires = t->expires.sched; 1008 tsk->it_sched_expires = t->expires.sched;
1009 break; 1009 break;
1010 } 1010 }
@@ -1024,7 +1024,7 @@ static void check_process_timers(struct task_struct *tsk,
1024 int maxfire; 1024 int maxfire;
1025 struct signal_struct *const sig = tsk->signal; 1025 struct signal_struct *const sig = tsk->signal;
1026 cputime_t utime, stime, ptime, virt_expires, prof_expires; 1026 cputime_t utime, stime, ptime, virt_expires, prof_expires;
1027 unsigned long long sched_time, sched_expires; 1027 unsigned long long sum_sched_runtime, sched_expires;
1028 struct task_struct *t; 1028 struct task_struct *t;
1029 struct list_head *timers = sig->cpu_timers; 1029 struct list_head *timers = sig->cpu_timers;
1030 1030
@@ -1044,12 +1044,12 @@ static void check_process_timers(struct task_struct *tsk,
1044 */ 1044 */
1045 utime = sig->utime; 1045 utime = sig->utime;
1046 stime = sig->stime; 1046 stime = sig->stime;
1047 sched_time = sig->sched_time; 1047 sum_sched_runtime = sig->sum_sched_runtime;
1048 t = tsk; 1048 t = tsk;
1049 do { 1049 do {
1050 utime = cputime_add(utime, t->utime); 1050 utime = cputime_add(utime, t->utime);
1051 stime = cputime_add(stime, t->stime); 1051 stime = cputime_add(stime, t->stime);
1052 sched_time += t->sched_time; 1052 sum_sched_runtime += t->se.sum_exec_runtime;
1053 t = next_thread(t); 1053 t = next_thread(t);
1054 } while (t != tsk); 1054 } while (t != tsk);
1055 ptime = cputime_add(utime, stime); 1055 ptime = cputime_add(utime, stime);
@@ -1090,7 +1090,7 @@ static void check_process_timers(struct task_struct *tsk,
1090 struct cpu_timer_list *t = list_first_entry(timers, 1090 struct cpu_timer_list *t = list_first_entry(timers,
1091 struct cpu_timer_list, 1091 struct cpu_timer_list,
1092 entry); 1092 entry);
1093 if (!--maxfire || sched_time < t->expires.sched) { 1093 if (!--maxfire || sum_sched_runtime < t->expires.sched) {
1094 sched_expires = t->expires.sched; 1094 sched_expires = t->expires.sched;
1095 break; 1095 break;
1096 } 1096 }
@@ -1182,7 +1182,7 @@ static void check_process_timers(struct task_struct *tsk,
1182 virt_left = cputime_sub(virt_expires, utime); 1182 virt_left = cputime_sub(virt_expires, utime);
1183 virt_left = cputime_div_non_zero(virt_left, nthreads); 1183 virt_left = cputime_div_non_zero(virt_left, nthreads);
1184 if (sched_expires) { 1184 if (sched_expires) {
1185 sched_left = sched_expires - sched_time; 1185 sched_left = sched_expires - sum_sched_runtime;
1186 do_div(sched_left, nthreads); 1186 do_div(sched_left, nthreads);
1187 sched_left = max_t(unsigned long long, sched_left, 1); 1187 sched_left = max_t(unsigned long long, sched_left, 1);
1188 } else { 1188 } else {
@@ -1208,7 +1208,7 @@ static void check_process_timers(struct task_struct *tsk,
1208 t->it_virt_expires = ticks; 1208 t->it_virt_expires = ticks;
1209 } 1209 }
1210 1210
1211 sched = t->sched_time + sched_left; 1211 sched = t->se.sum_exec_runtime + sched_left;
1212 if (sched_expires && (t->it_sched_expires == 0 || 1212 if (sched_expires && (t->it_sched_expires == 0 ||
1213 t->it_sched_expires > sched)) { 1213 t->it_sched_expires > sched)) {
1214 t->it_sched_expires = sched; 1214 t->it_sched_expires = sched;
@@ -1300,7 +1300,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1300 1300
1301 if (UNEXPIRED(prof) && UNEXPIRED(virt) && 1301 if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
1302 (tsk->it_sched_expires == 0 || 1302 (tsk->it_sched_expires == 0 ||
1303 tsk->sched_time < tsk->it_sched_expires)) 1303 tsk->se.sum_exec_runtime < tsk->it_sched_expires))
1304 return; 1304 return;
1305 1305
1306#undef UNEXPIRED 1306#undef UNEXPIRED
diff --git a/kernel/sched.c b/kernel/sched.c
index 29eb227e33f7..0333abdda85e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3156,28 +3156,23 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
3156EXPORT_PER_CPU_SYMBOL(kstat); 3156EXPORT_PER_CPU_SYMBOL(kstat);
3157 3157
3158/* 3158/*
3159 * This is called on clock ticks and on context switches. 3159 * Return p->sum_exec_runtime plus any more ns on the sched_clock
3160 * Bank in p->sched_time the ns elapsed since the last tick or switch. 3160 * that have not yet been banked in case the task is currently running.
3161 */ 3161 */
3162static inline void 3162unsigned long long task_sched_runtime(struct task_struct *p)
3163update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
3164{ 3163{
3165 p->sched_time += now - p->last_ran;
3166 p->last_ran = rq->most_recent_timestamp = now;
3167}
3168
3169/*
3170 * Return current->sched_time plus any more ns on the sched_clock
3171 * that have not yet been banked.
3172 */
3173unsigned long long current_sched_time(const struct task_struct *p)
3174{
3175 unsigned long long ns;
3176 unsigned long flags; 3164 unsigned long flags;
3165 u64 ns, delta_exec;
3166 struct rq *rq;
3177 3167
3178 local_irq_save(flags); 3168 rq = task_rq_lock(p, &flags);
3179 ns = p->sched_time + sched_clock() - p->last_ran; 3169 ns = p->se.sum_exec_runtime;
3180 local_irq_restore(flags); 3170 if (rq->curr == p) {
3171 delta_exec = rq_clock(rq) - p->se.exec_start;
3172 if ((s64)delta_exec > 0)
3173 ns += delta_exec;
3174 }
3175 task_rq_unlock(rq, &flags);
3181 3176
3182 return ns; 3177 return ns;
3183} 3178}
@@ -3360,14 +3355,11 @@ out_unlock:
3360 */ 3355 */
3361void scheduler_tick(void) 3356void scheduler_tick(void)
3362{ 3357{
3363 unsigned long long now = sched_clock();
3364 struct task_struct *p = current; 3358 struct task_struct *p = current;
3365 int cpu = smp_processor_id(); 3359 int cpu = smp_processor_id();
3366 int idle_at_tick = idle_cpu(cpu); 3360 int idle_at_tick = idle_cpu(cpu);
3367 struct rq *rq = cpu_rq(cpu); 3361 struct rq *rq = cpu_rq(cpu);
3368 3362
3369 update_cpu_clock(p, rq, now);
3370
3371 if (!idle_at_tick) 3363 if (!idle_at_tick)
3372 task_running_tick(rq, p); 3364 task_running_tick(rq, p);
3373#ifdef CONFIG_SMP 3365#ifdef CONFIG_SMP
@@ -3550,8 +3542,6 @@ switch_tasks:
3550 clear_tsk_need_resched(prev); 3542 clear_tsk_need_resched(prev);
3551 rcu_qsctr_inc(task_cpu(prev)); 3543 rcu_qsctr_inc(task_cpu(prev));
3552 3544
3553 update_cpu_clock(prev, rq, now);
3554
3555 prev->sleep_avg -= run_time; 3545 prev->sleep_avg -= run_time;
3556 if ((long)prev->sleep_avg <= 0) 3546 if ((long)prev->sleep_avg <= 0)
3557 prev->sleep_avg = 0; 3547 prev->sleep_avg = 0;