summaryrefslogtreecommitdiffstats
path: root/kernel/sched/cputime.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:43:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:43:28 -0400
commit16fa94b532b1958f508e07eca1a9256351241fbc (patch)
tree90012a7b7fe2b8cf96f6f5ec12490e0c5e152291 /kernel/sched/cputime.c
parente0972916e8fe943f342b0dd1c9d43dbf5bc261c2 (diff)
parent25f55d9d01ad7a7ad248fd5af1d22675ffd202c5 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "The main changes in this development cycle were: - full dynticks preparatory work by Frederic Weisbecker - factor out the cpu time accounting code better, by Li Zefan - multi-CPU load balancer cleanups and improvements by Joonsoo Kim - various smaller fixes and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) sched: Fix init NOHZ_IDLE flag sched: Prevent to re-select dst-cpu in load_balance() sched: Rename load_balance_tmpmask to load_balance_mask sched: Move up affinity check to mitigate useless redoing overhead sched: Don't consider other cpus in our group in case of NEWLY_IDLE sched: Explicitly cpu_idle_type checking in rebalance_domains() sched: Change position of resched_cpu() in load_balance() sched: Fix wrong rq's runnable_avg update with rt tasks sched: Document task_struct::personality field sched/cpuacct/UML: Fix header file dependency bug on the UML build cgroup: Kill subsys.active flag sched/cpuacct: No need to check subsys active state sched/cpuacct: Initialize cpuacct subsystem earlier sched/cpuacct: Initialize root cpuacct earlier sched/cpuacct: Allocate per_cpu cpuusage for root cpuacct statically sched/cpuacct: Clean up cpuacct.h sched/cpuacct: Remove redundant NULL checks in cpuacct_acount_field() sched/cpuacct: Remove redundant NULL checks in cpuacct_charge() sched/cpuacct: Add cpuacct_acount_field() sched/cpuacct: Add cpuacct_init() ...
Diffstat (limited to 'kernel/sched/cputime.c')
-rw-r--r--kernel/sched/cputime.c214
1 files changed, 113 insertions, 101 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index e93cca92f38b..ea32f02bf2c3 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -115,10 +115,6 @@ static int irqtime_account_si_update(void)
115static inline void task_group_account_field(struct task_struct *p, int index, 115static inline void task_group_account_field(struct task_struct *p, int index,
116 u64 tmp) 116 u64 tmp)
117{ 117{
118#ifdef CONFIG_CGROUP_CPUACCT
119 struct kernel_cpustat *kcpustat;
120 struct cpuacct *ca;
121#endif
122 /* 118 /*
123 * Since all updates are sure to touch the root cgroup, we 119 * Since all updates are sure to touch the root cgroup, we
124 * get ourselves ahead and touch it first. If the root cgroup 120 * get ourselves ahead and touch it first. If the root cgroup
@@ -127,19 +123,7 @@ static inline void task_group_account_field(struct task_struct *p, int index,
127 */ 123 */
128 __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; 124 __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
129 125
130#ifdef CONFIG_CGROUP_CPUACCT 126 cpuacct_account_field(p, index, tmp);
131 if (unlikely(!cpuacct_subsys.active))
132 return;
133
134 rcu_read_lock();
135 ca = task_ca(p);
136 while (ca && (ca != &root_cpuacct)) {
137 kcpustat = this_cpu_ptr(ca->cpustat);
138 kcpustat->cpustat[index] += tmp;
139 ca = parent_ca(ca);
140 }
141 rcu_read_unlock();
142#endif
143} 127}
144 128
145/* 129/*
@@ -388,82 +372,10 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
388 struct rq *rq) {} 372 struct rq *rq) {}
389#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 373#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
390 374
391#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
392/*
393 * Account a single tick of cpu time.
394 * @p: the process that the cpu time gets accounted to
395 * @user_tick: indicates if the tick is a user or a system tick
396 */
397void account_process_tick(struct task_struct *p, int user_tick)
398{
399 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
400 struct rq *rq = this_rq();
401
402 if (vtime_accounting_enabled())
403 return;
404
405 if (sched_clock_irqtime) {
406 irqtime_account_process_tick(p, user_tick, rq);
407 return;
408 }
409
410 if (steal_account_process_tick())
411 return;
412
413 if (user_tick)
414 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
415 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
416 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
417 one_jiffy_scaled);
418 else
419 account_idle_time(cputime_one_jiffy);
420}
421
422/*
423 * Account multiple ticks of steal time.
424 * @p: the process from which the cpu time has been stolen
425 * @ticks: number of stolen ticks
426 */
427void account_steal_ticks(unsigned long ticks)
428{
429 account_steal_time(jiffies_to_cputime(ticks));
430}
431
432/*
433 * Account multiple ticks of idle time.
434 * @ticks: number of stolen ticks
435 */
436void account_idle_ticks(unsigned long ticks)
437{
438
439 if (sched_clock_irqtime) {
440 irqtime_account_idle_ticks(ticks);
441 return;
442 }
443
444 account_idle_time(jiffies_to_cputime(ticks));
445}
446#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
447
448/* 375/*
449 * Use precise platform statistics if available: 376 * Use precise platform statistics if available:
450 */ 377 */
451#ifdef CONFIG_VIRT_CPU_ACCOUNTING 378#ifdef CONFIG_VIRT_CPU_ACCOUNTING
452void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
453{
454 *ut = p->utime;
455 *st = p->stime;
456}
457
458void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
459{
460 struct task_cputime cputime;
461
462 thread_group_cputime(p, &cputime);
463
464 *ut = cputime.utime;
465 *st = cputime.stime;
466}
467 379
468#ifndef __ARCH_HAS_VTIME_TASK_SWITCH 380#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
469void vtime_task_switch(struct task_struct *prev) 381void vtime_task_switch(struct task_struct *prev)
@@ -518,21 +430,111 @@ void vtime_account_irq_enter(struct task_struct *tsk)
518} 430}
519EXPORT_SYMBOL_GPL(vtime_account_irq_enter); 431EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
520#endif /* __ARCH_HAS_VTIME_ACCOUNT */ 432#endif /* __ARCH_HAS_VTIME_ACCOUNT */
433#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
434
435
436#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
437void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
438{
439 *ut = p->utime;
440 *st = p->stime;
441}
521 442
522#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 443void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
444{
445 struct task_cputime cputime;
523 446
524static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total) 447 thread_group_cputime(p, &cputime);
448
449 *ut = cputime.utime;
450 *st = cputime.stime;
451}
452#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
453/*
454 * Account a single tick of cpu time.
455 * @p: the process that the cpu time gets accounted to
456 * @user_tick: indicates if the tick is a user or a system tick
457 */
458void account_process_tick(struct task_struct *p, int user_tick)
525{ 459{
526 u64 temp = (__force u64) rtime; 460 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
461 struct rq *rq = this_rq();
527 462
528 temp *= (__force u64) stime; 463 if (vtime_accounting_enabled())
464 return;
465
466 if (sched_clock_irqtime) {
467 irqtime_account_process_tick(p, user_tick, rq);
468 return;
469 }
470
471 if (steal_account_process_tick())
472 return;
529 473
530 if (sizeof(cputime_t) == 4) 474 if (user_tick)
531 temp = div_u64(temp, (__force u32) total); 475 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
476 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
477 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
478 one_jiffy_scaled);
532 else 479 else
533 temp = div64_u64(temp, (__force u64) total); 480 account_idle_time(cputime_one_jiffy);
481}
534 482
535 return (__force cputime_t) temp; 483/*
484 * Account multiple ticks of steal time.
485 * @p: the process from which the cpu time has been stolen
486 * @ticks: number of stolen ticks
487 */
488void account_steal_ticks(unsigned long ticks)
489{
490 account_steal_time(jiffies_to_cputime(ticks));
491}
492
493/*
494 * Account multiple ticks of idle time.
495 * @ticks: number of stolen ticks
496 */
497void account_idle_ticks(unsigned long ticks)
498{
499
500 if (sched_clock_irqtime) {
501 irqtime_account_idle_ticks(ticks);
502 return;
503 }
504
505 account_idle_time(jiffies_to_cputime(ticks));
506}
507
508/*
509 * Perform (stime * rtime) / total with reduced chances
510 * of multiplication overflows by using smaller factors
511 * like quotient and remainders of divisions between
512 * rtime and total.
513 */
514static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
515{
516 u64 rem, res, scaled;
517
518 if (rtime >= total) {
519 /*
520 * Scale up to rtime / total then add
521 * the remainder scaled to stime / total.
522 */
523 res = div64_u64_rem(rtime, total, &rem);
524 scaled = stime * res;
525 scaled += div64_u64(stime * rem, total);
526 } else {
527 /*
528 * Same in reverse: scale down to total / rtime
529 * then substract that result scaled to
530 * to the remaining part.
531 */
532 res = div64_u64_rem(total, rtime, &rem);
533 scaled = div64_u64(stime, res);
534 scaled -= div64_u64(scaled * rem, total);
535 }
536
537 return (__force cputime_t) scaled;
536} 538}
537 539
538/* 540/*
@@ -545,6 +547,12 @@ static void cputime_adjust(struct task_cputime *curr,
545{ 547{
546 cputime_t rtime, stime, total; 548 cputime_t rtime, stime, total;
547 549
550 if (vtime_accounting_enabled()) {
551 *ut = curr->utime;
552 *st = curr->stime;
553 return;
554 }
555
548 stime = curr->stime; 556 stime = curr->stime;
549 total = stime + curr->utime; 557 total = stime + curr->utime;
550 558
@@ -560,10 +568,14 @@ static void cputime_adjust(struct task_cputime *curr,
560 */ 568 */
561 rtime = nsecs_to_cputime(curr->sum_exec_runtime); 569 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
562 570
563 if (total) 571 if (!rtime) {
564 stime = scale_stime(stime, rtime, total); 572 stime = 0;
565 else 573 } else if (!total) {
566 stime = rtime; 574 stime = rtime;
575 } else {
576 stime = scale_stime((__force u64)stime,
577 (__force u64)rtime, (__force u64)total);
578 }
567 579
568 /* 580 /*
569 * If the tick based count grows faster than the scheduler one, 581 * If the tick based count grows faster than the scheduler one,
@@ -597,7 +609,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
597 thread_group_cputime(p, &cputime); 609 thread_group_cputime(p, &cputime);
598 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); 610 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
599} 611}
600#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 612#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
601 613
602#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 614#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
603static unsigned long long vtime_delta(struct task_struct *tsk) 615static unsigned long long vtime_delta(struct task_struct *tsk)