aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChandra Seetharaman <sekharan@us.ibm.com>2006-07-14 03:24:38 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-15 00:53:56 -0400
commit52f17b6c2bd443e7806a161e9d10a983650db01d (patch)
tree67f9a8964a3ac78091cefcd7baf8935175a0a003
parent0ff922452df86f3e9a2c6f705c4588ec62d096a7 (diff)
[PATCH] per-task-delay-accounting: cpu delay collection via schedstats
Make the task-related schedstats functions callable by delay accounting even if schedstats collection isn't turned on. This removes the dependency of delay accounting on schedstats. Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Signed-off-by: Shailabh Nagar <nagar@watson.ibm.com> Signed-off-by: Balbir Singh <balbir@in.ibm.com> Cc: Jes Sorensen <jes@sgi.com> Cc: Peter Chubb <peterc@gelato.unsw.edu.au> Cc: Erich Focht <efocht@ess.nec.de> Cc: Levent Serinol <lserinol@gmail.com> Cc: Jay Lan <jlan@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/sched.h20
-rw-r--r--kernel/sched.c71
2 files changed, 66 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2f43f1fb7de7..f751062d89a2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -537,7 +537,7 @@ extern struct user_struct root_user;
537struct backing_dev_info; 537struct backing_dev_info;
538struct reclaim_state; 538struct reclaim_state;
539 539
540#ifdef CONFIG_SCHEDSTATS 540#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
541struct sched_info { 541struct sched_info {
542 /* cumulative counters */ 542 /* cumulative counters */
543 unsigned long cpu_time, /* time spent on the cpu */ 543 unsigned long cpu_time, /* time spent on the cpu */
@@ -548,9 +548,11 @@ struct sched_info {
548 unsigned long last_arrival, /* when we last ran on a cpu */ 548 unsigned long last_arrival, /* when we last ran on a cpu */
549 last_queued; /* when we were last queued to run */ 549 last_queued; /* when we were last queued to run */
550}; 550};
551#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
551 552
553#ifdef CONFIG_SCHEDSTATS
552extern struct file_operations proc_schedstat_operations; 554extern struct file_operations proc_schedstat_operations;
553#endif 555#endif /* CONFIG_SCHEDSTATS */
554 556
555#ifdef CONFIG_TASK_DELAY_ACCT 557#ifdef CONFIG_TASK_DELAY_ACCT
556struct task_delay_info { 558struct task_delay_info {
@@ -580,7 +582,19 @@ struct task_delay_info {
580 u32 swapin_count; /* total count of the number of swapin block */ 582 u32 swapin_count; /* total count of the number of swapin block */
581 /* io operations performed */ 583 /* io operations performed */
582}; 584};
585#endif /* CONFIG_TASK_DELAY_ACCT */
586
587static inline int sched_info_on(void)
588{
589#ifdef CONFIG_SCHEDSTATS
590 return 1;
591#elif defined(CONFIG_TASK_DELAY_ACCT)
592 extern int delayacct_on;
593 return delayacct_on;
594#else
595 return 0;
583#endif 596#endif
597}
584 598
585enum idle_type 599enum idle_type
586{ 600{
@@ -777,7 +791,7 @@ struct task_struct {
777 cpumask_t cpus_allowed; 791 cpumask_t cpus_allowed;
778 unsigned int time_slice, first_time_slice; 792 unsigned int time_slice, first_time_slice;
779 793
780#ifdef CONFIG_SCHEDSTATS 794#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
781 struct sched_info sched_info; 795 struct sched_info sched_info;
782#endif 796#endif
783 797
diff --git a/kernel/sched.c b/kernel/sched.c
index 9d42cbfc4f8b..b44b9a43b0fc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -502,9 +502,36 @@ struct file_operations proc_schedstat_operations = {
502 .release = single_release, 502 .release = single_release,
503}; 503};
504 504
505/*
506 * Expects runqueue lock to be held for atomicity of update
507 */
508static inline void
509rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
510{
511 if (rq) {
512 rq->rq_sched_info.run_delay += delta_jiffies;
513 rq->rq_sched_info.pcnt++;
514 }
515}
516
517/*
518 * Expects runqueue lock to be held for atomicity of update
519 */
520static inline void
521rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
522{
523 if (rq)
524 rq->rq_sched_info.cpu_time += delta_jiffies;
525}
505# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 526# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
506# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 527# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
507#else /* !CONFIG_SCHEDSTATS */ 528#else /* !CONFIG_SCHEDSTATS */
529static inline void
530rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
531{}
532static inline void
533rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
534{}
508# define schedstat_inc(rq, field) do { } while (0) 535# define schedstat_inc(rq, field) do { } while (0)
509# define schedstat_add(rq, field, amt) do { } while (0) 536# define schedstat_add(rq, field, amt) do { } while (0)
510#endif 537#endif
@@ -524,7 +551,7 @@ static inline struct rq *this_rq_lock(void)
524 return rq; 551 return rq;
525} 552}
526 553
527#ifdef CONFIG_SCHEDSTATS 554#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
528/* 555/*
529 * Called when a process is dequeued from the active array and given 556 * Called when a process is dequeued from the active array and given
530 * the cpu. We should note that with the exception of interactive 557 * the cpu. We should note that with the exception of interactive
@@ -552,21 +579,16 @@ static inline void sched_info_dequeued(struct task_struct *t)
552 */ 579 */
553static void sched_info_arrive(struct task_struct *t) 580static void sched_info_arrive(struct task_struct *t)
554{ 581{
555 unsigned long now = jiffies, diff = 0; 582 unsigned long now = jiffies, delta_jiffies = 0;
556 struct rq *rq = task_rq(t);
557 583
558 if (t->sched_info.last_queued) 584 if (t->sched_info.last_queued)
559 diff = now - t->sched_info.last_queued; 585 delta_jiffies = now - t->sched_info.last_queued;
560 sched_info_dequeued(t); 586 sched_info_dequeued(t);
561 t->sched_info.run_delay += diff; 587 t->sched_info.run_delay += delta_jiffies;
562 t->sched_info.last_arrival = now; 588 t->sched_info.last_arrival = now;
563 t->sched_info.pcnt++; 589 t->sched_info.pcnt++;
564 590
565 if (!rq) 591 rq_sched_info_arrive(task_rq(t), delta_jiffies);
566 return;
567
568 rq->rq_sched_info.run_delay += diff;
569 rq->rq_sched_info.pcnt++;
570} 592}
571 593
572/* 594/*
@@ -586,8 +608,9 @@ static void sched_info_arrive(struct task_struct *t)
586 */ 608 */
587static inline void sched_info_queued(struct task_struct *t) 609static inline void sched_info_queued(struct task_struct *t)
588{ 610{
589 if (!t->sched_info.last_queued) 611 if (unlikely(sched_info_on()))
590 t->sched_info.last_queued = jiffies; 612 if (!t->sched_info.last_queued)
613 t->sched_info.last_queued = jiffies;
591} 614}
592 615
593/* 616/*
@@ -596,13 +619,10 @@ static inline void sched_info_queued(struct task_struct *t)
596 */ 619 */
597static inline void sched_info_depart(struct task_struct *t) 620static inline void sched_info_depart(struct task_struct *t)
598{ 621{
599 struct rq *rq = task_rq(t); 622 unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
600 unsigned long diff = jiffies - t->sched_info.last_arrival;
601
602 t->sched_info.cpu_time += diff;
603 623
604 if (rq) 624 t->sched_info.cpu_time += delta_jiffies;
605 rq->rq_sched_info.cpu_time += diff; 625 rq_sched_info_depart(task_rq(t), delta_jiffies);
606} 626}
607 627
608/* 628/*
@@ -611,7 +631,7 @@ static inline void sched_info_depart(struct task_struct *t)
611 * the idle task.) We are only called when prev != next. 631 * the idle task.) We are only called when prev != next.
612 */ 632 */
613static inline void 633static inline void
614sched_info_switch(struct task_struct *prev, struct task_struct *next) 634__sched_info_switch(struct task_struct *prev, struct task_struct *next)
615{ 635{
616 struct rq *rq = task_rq(prev); 636 struct rq *rq = task_rq(prev);
617 637
@@ -626,10 +646,16 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
626 if (next != rq->idle) 646 if (next != rq->idle)
627 sched_info_arrive(next); 647 sched_info_arrive(next);
628} 648}
649static inline void
650sched_info_switch(struct task_struct *prev, struct task_struct *next)
651{
652 if (unlikely(sched_info_on()))
653 __sched_info_switch(prev, next);
654}
629#else 655#else
630#define sched_info_queued(t) do { } while (0) 656#define sched_info_queued(t) do { } while (0)
631#define sched_info_switch(t, next) do { } while (0) 657#define sched_info_switch(t, next) do { } while (0)
632#endif /* CONFIG_SCHEDSTATS */ 658#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
633 659
634/* 660/*
635 * Adding/removing a task to/from a priority array: 661 * Adding/removing a task to/from a priority array:
@@ -1531,8 +1557,9 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
1531 1557
1532 INIT_LIST_HEAD(&p->run_list); 1558 INIT_LIST_HEAD(&p->run_list);
1533 p->array = NULL; 1559 p->array = NULL;
1534#ifdef CONFIG_SCHEDSTATS 1560#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1535 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1561 if (unlikely(sched_info_on()))
1562 memset(&p->sched_info, 0, sizeof(p->sched_info));
1536#endif 1563#endif
1537#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 1564#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1538 p->oncpu = 0; 1565 p->oncpu = 0;