aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorNikhil Rao <ncrao@google.com>2011-07-21 12:43:40 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 06:03:49 -0400
commite8da1b18b32064c43881bceef0f051c2110c9ab9 (patch)
tree8d6e9e5713c1da4cee3ceba1da67df6cab22ab9f /kernel/sched_fair.c
parentd3d9dc3302368269acf94b7381663b93000fe2fe (diff)
sched: Add exports tracking cfs bandwidth control statistics
This change introduces statistics exports for the cpu sub-system, these are added through the use of a stat file similar to that exported by other subsystems. The following exports are included: nr_periods: number of periods in which execution occurred nr_throttled: the number of periods above in which execution was throttle throttled_time: cumulative wall-time that any cpus have been throttled for this group Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Nikhil Rao <ncrao@google.com> Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184758.198901931@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f9f671a7d0af..d201f28c1de7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1506,6 +1506,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
1506 rq->nr_running -= task_delta; 1506 rq->nr_running -= task_delta;
1507 1507
1508 cfs_rq->throttled = 1; 1508 cfs_rq->throttled = 1;
1509 cfs_rq->throttled_timestamp = rq->clock;
1509 raw_spin_lock(&cfs_b->lock); 1510 raw_spin_lock(&cfs_b->lock);
1510 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 1511 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
1511 raw_spin_unlock(&cfs_b->lock); 1512 raw_spin_unlock(&cfs_b->lock);
@@ -1523,8 +1524,10 @@ static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
1523 1524
1524 cfs_rq->throttled = 0; 1525 cfs_rq->throttled = 0;
1525 raw_spin_lock(&cfs_b->lock); 1526 raw_spin_lock(&cfs_b->lock);
1527 cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
1526 list_del_rcu(&cfs_rq->throttled_list); 1528 list_del_rcu(&cfs_rq->throttled_list);
1527 raw_spin_unlock(&cfs_b->lock); 1529 raw_spin_unlock(&cfs_b->lock);
1530 cfs_rq->throttled_timestamp = 0;
1528 1531
1529 update_rq_clock(rq); 1532 update_rq_clock(rq);
1530 /* update hierarchical throttle state */ 1533 /* update hierarchical throttle state */
@@ -1612,6 +1615,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1612 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 1615 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1613 /* idle depends on !throttled (for the case of a large deficit) */ 1616 /* idle depends on !throttled (for the case of a large deficit) */
1614 idle = cfs_b->idle && !throttled; 1617 idle = cfs_b->idle && !throttled;
1618 cfs_b->nr_periods += overrun;
1615 1619
1616 /* if we're going inactive then everything else can be deferred */ 1620 /* if we're going inactive then everything else can be deferred */
1617 if (idle) 1621 if (idle)
@@ -1625,6 +1629,9 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1625 goto out_unlock; 1629 goto out_unlock;
1626 } 1630 }
1627 1631
1632 /* account preceding periods in which throttling occurred */
1633 cfs_b->nr_throttled += overrun;
1634
1628 /* 1635 /*
1629 * There are throttled entities so we must first use the new bandwidth 1636 * There are throttled entities so we must first use the new bandwidth
1630 * to unthrottle them before making it generally available. This 1637 * to unthrottle them before making it generally available. This