aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChandra Seetharaman <sekharan@us.ibm.com>2006-07-14 03:24:38 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-15 00:53:56 -0400
commit52f17b6c2bd443e7806a161e9d10a983650db01d (patch)
tree67f9a8964a3ac78091cefcd7baf8935175a0a003 /kernel
parent0ff922452df86f3e9a2c6f705c4588ec62d096a7 (diff)
[PATCH] per-task-delay-accounting: cpu delay collection via schedstats
Make the task-related schedstats functions callable by delay accounting even if schedstats collection isn't turned on. This removes the dependency of delay accounting on schedstats. Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Signed-off-by: Shailabh Nagar <nagar@watson.ibm.com> Signed-off-by: Balbir Singh <balbir@in.ibm.com> Cc: Jes Sorensen <jes@sgi.com> Cc: Peter Chubb <peterc@gelato.unsw.edu.au> Cc: Erich Focht <efocht@ess.nec.de> Cc: Levent Serinol <lserinol@gmail.com> Cc: Jay Lan <jlan@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c71
1 files changed, 49 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9d42cbfc4f8b..b44b9a43b0fc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -502,9 +502,36 @@ struct file_operations proc_schedstat_operations = {
502 .release = single_release, 502 .release = single_release,
503}; 503};
504 504
505/*
506 * Expects runqueue lock to be held for atomicity of update
507 */
508static inline void
509rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
510{
511 if (rq) {
512 rq->rq_sched_info.run_delay += delta_jiffies;
513 rq->rq_sched_info.pcnt++;
514 }
515}
516
517/*
518 * Expects runqueue lock to be held for atomicity of update
519 */
520static inline void
521rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
522{
523 if (rq)
524 rq->rq_sched_info.cpu_time += delta_jiffies;
525}
505# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 526# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
506# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 527# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
507#else /* !CONFIG_SCHEDSTATS */ 528#else /* !CONFIG_SCHEDSTATS */
529static inline void
530rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
531{}
532static inline void
533rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
534{}
508# define schedstat_inc(rq, field) do { } while (0) 535# define schedstat_inc(rq, field) do { } while (0)
509# define schedstat_add(rq, field, amt) do { } while (0) 536# define schedstat_add(rq, field, amt) do { } while (0)
510#endif 537#endif
@@ -524,7 +551,7 @@ static inline struct rq *this_rq_lock(void)
524 return rq; 551 return rq;
525} 552}
526 553
527#ifdef CONFIG_SCHEDSTATS 554#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
528/* 555/*
529 * Called when a process is dequeued from the active array and given 556 * Called when a process is dequeued from the active array and given
530 * the cpu. We should note that with the exception of interactive 557 * the cpu. We should note that with the exception of interactive
@@ -552,21 +579,16 @@ static inline void sched_info_dequeued(struct task_struct *t)
552 */ 579 */
553static void sched_info_arrive(struct task_struct *t) 580static void sched_info_arrive(struct task_struct *t)
554{ 581{
555 unsigned long now = jiffies, diff = 0; 582 unsigned long now = jiffies, delta_jiffies = 0;
556 struct rq *rq = task_rq(t);
557 583
558 if (t->sched_info.last_queued) 584 if (t->sched_info.last_queued)
559 diff = now - t->sched_info.last_queued; 585 delta_jiffies = now - t->sched_info.last_queued;
560 sched_info_dequeued(t); 586 sched_info_dequeued(t);
561 t->sched_info.run_delay += diff; 587 t->sched_info.run_delay += delta_jiffies;
562 t->sched_info.last_arrival = now; 588 t->sched_info.last_arrival = now;
563 t->sched_info.pcnt++; 589 t->sched_info.pcnt++;
564 590
565 if (!rq) 591 rq_sched_info_arrive(task_rq(t), delta_jiffies);
566 return;
567
568 rq->rq_sched_info.run_delay += diff;
569 rq->rq_sched_info.pcnt++;
570} 592}
571 593
572/* 594/*
@@ -586,8 +608,9 @@ static void sched_info_arrive(struct task_struct *t)
586 */ 608 */
587static inline void sched_info_queued(struct task_struct *t) 609static inline void sched_info_queued(struct task_struct *t)
588{ 610{
589 if (!t->sched_info.last_queued) 611 if (unlikely(sched_info_on()))
590 t->sched_info.last_queued = jiffies; 612 if (!t->sched_info.last_queued)
613 t->sched_info.last_queued = jiffies;
591} 614}
592 615
593/* 616/*
@@ -596,13 +619,10 @@ static inline void sched_info_queued(struct task_struct *t)
596 */ 619 */
597static inline void sched_info_depart(struct task_struct *t) 620static inline void sched_info_depart(struct task_struct *t)
598{ 621{
599 struct rq *rq = task_rq(t); 622 unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
600 unsigned long diff = jiffies - t->sched_info.last_arrival;
601
602 t->sched_info.cpu_time += diff;
603 623
604 if (rq) 624 t->sched_info.cpu_time += delta_jiffies;
605 rq->rq_sched_info.cpu_time += diff; 625 rq_sched_info_depart(task_rq(t), delta_jiffies);
606} 626}
607 627
608/* 628/*
@@ -611,7 +631,7 @@ static inline void sched_info_depart(struct task_struct *t)
611 * the idle task.) We are only called when prev != next. 631 * the idle task.) We are only called when prev != next.
612 */ 632 */
613static inline void 633static inline void
614sched_info_switch(struct task_struct *prev, struct task_struct *next) 634__sched_info_switch(struct task_struct *prev, struct task_struct *next)
615{ 635{
616 struct rq *rq = task_rq(prev); 636 struct rq *rq = task_rq(prev);
617 637
@@ -626,10 +646,16 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
626 if (next != rq->idle) 646 if (next != rq->idle)
627 sched_info_arrive(next); 647 sched_info_arrive(next);
628} 648}
649static inline void
650sched_info_switch(struct task_struct *prev, struct task_struct *next)
651{
652 if (unlikely(sched_info_on()))
653 __sched_info_switch(prev, next);
654}
629#else 655#else
630#define sched_info_queued(t) do { } while (0) 656#define sched_info_queued(t) do { } while (0)
631#define sched_info_switch(t, next) do { } while (0) 657#define sched_info_switch(t, next) do { } while (0)
632#endif /* CONFIG_SCHEDSTATS */ 658#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
633 659
634/* 660/*
635 * Adding/removing a task to/from a priority array: 661 * Adding/removing a task to/from a priority array:
@@ -1531,8 +1557,9 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
1531 1557
1532 INIT_LIST_HEAD(&p->run_list); 1558 INIT_LIST_HEAD(&p->run_list);
1533 p->array = NULL; 1559 p->array = NULL;
1534#ifdef CONFIG_SCHEDSTATS 1560#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1535 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1561 if (unlikely(sched_info_on()))
1562 memset(&p->sched_info, 0, sizeof(p->sched_info));
1536#endif 1563#endif
1537#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 1564#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1538 p->oncpu = 0; 1565 p->oncpu = 0;