aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorNikhil Rao <ncrao@google.com>2011-07-21 12:43:40 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 06:03:49 -0400
commite8da1b18b32064c43881bceef0f051c2110c9ab9 (patch)
tree8d6e9e5713c1da4cee3ceba1da67df6cab22ab9f /kernel
parentd3d9dc3302368269acf94b7381663b93000fe2fe (diff)
sched: Add exports tracking cfs bandwidth control statistics
This change introduces statistics exports for the cpu sub-system, these are added through the use of a stat file similar to that exported by other subsystems. The following exports are included: nr_periods: number of periods in which execution occurred nr_throttled: the number of periods above in which execution was throttle throttled_time: cumulative wall-time that any cpus have been throttled for this group Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Nikhil Rao <ncrao@google.com> Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184758.198901931@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c21
-rw-r--r--kernel/sched_fair.c7
2 files changed, 28 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 397317248ddd..35c91859f8a6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -262,6 +262,9 @@ struct cfs_bandwidth {
262 struct hrtimer period_timer; 262 struct hrtimer period_timer;
263 struct list_head throttled_cfs_rq; 263 struct list_head throttled_cfs_rq;
264 264
265 /* statistics */
266 int nr_periods, nr_throttled;
267 u64 throttled_time;
265#endif 268#endif
266}; 269};
267 270
@@ -402,6 +405,7 @@ struct cfs_rq {
402 u64 runtime_expires; 405 u64 runtime_expires;
403 s64 runtime_remaining; 406 s64 runtime_remaining;
404 407
408 u64 throttled_timestamp;
405 int throttled, throttle_count; 409 int throttled, throttle_count;
406 struct list_head throttled_list; 410 struct list_head throttled_list;
407#endif 411#endif
@@ -9397,6 +9401,19 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9397 9401
9398 return ret; 9402 return ret;
9399} 9403}
9404
9405static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
9406 struct cgroup_map_cb *cb)
9407{
9408 struct task_group *tg = cgroup_tg(cgrp);
9409 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
9410
9411 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
9412 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
9413 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
9414
9415 return 0;
9416}
9400#endif /* CONFIG_CFS_BANDWIDTH */ 9417#endif /* CONFIG_CFS_BANDWIDTH */
9401#endif /* CONFIG_FAIR_GROUP_SCHED */ 9418#endif /* CONFIG_FAIR_GROUP_SCHED */
9402 9419
@@ -9443,6 +9460,10 @@ static struct cftype cpu_files[] = {
9443 .read_u64 = cpu_cfs_period_read_u64, 9460 .read_u64 = cpu_cfs_period_read_u64,
9444 .write_u64 = cpu_cfs_period_write_u64, 9461 .write_u64 = cpu_cfs_period_write_u64,
9445 }, 9462 },
9463 {
9464 .name = "stat",
9465 .read_map = cpu_stats_show,
9466 },
9446#endif 9467#endif
9447#ifdef CONFIG_RT_GROUP_SCHED 9468#ifdef CONFIG_RT_GROUP_SCHED
9448 { 9469 {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f9f671a7d0af..d201f28c1de7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1506,6 +1506,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
1506 rq->nr_running -= task_delta; 1506 rq->nr_running -= task_delta;
1507 1507
1508 cfs_rq->throttled = 1; 1508 cfs_rq->throttled = 1;
1509 cfs_rq->throttled_timestamp = rq->clock;
1509 raw_spin_lock(&cfs_b->lock); 1510 raw_spin_lock(&cfs_b->lock);
1510 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 1511 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
1511 raw_spin_unlock(&cfs_b->lock); 1512 raw_spin_unlock(&cfs_b->lock);
@@ -1523,8 +1524,10 @@ static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
1523 1524
1524 cfs_rq->throttled = 0; 1525 cfs_rq->throttled = 0;
1525 raw_spin_lock(&cfs_b->lock); 1526 raw_spin_lock(&cfs_b->lock);
1527 cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
1526 list_del_rcu(&cfs_rq->throttled_list); 1528 list_del_rcu(&cfs_rq->throttled_list);
1527 raw_spin_unlock(&cfs_b->lock); 1529 raw_spin_unlock(&cfs_b->lock);
1530 cfs_rq->throttled_timestamp = 0;
1528 1531
1529 update_rq_clock(rq); 1532 update_rq_clock(rq);
1530 /* update hierarchical throttle state */ 1533 /* update hierarchical throttle state */
@@ -1612,6 +1615,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1612 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 1615 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1613 /* idle depends on !throttled (for the case of a large deficit) */ 1616 /* idle depends on !throttled (for the case of a large deficit) */
1614 idle = cfs_b->idle && !throttled; 1617 idle = cfs_b->idle && !throttled;
1618 cfs_b->nr_periods += overrun;
1615 1619
1616 /* if we're going inactive then everything else can be deferred */ 1620 /* if we're going inactive then everything else can be deferred */
1617 if (idle) 1621 if (idle)
@@ -1625,6 +1629,9 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1625 goto out_unlock; 1629 goto out_unlock;
1626 } 1630 }
1627 1631
1632 /* account preceding periods in which throttling occurred */
1633 cfs_b->nr_throttled += overrun;
1634
1628 /* 1635 /*
1629 * There are throttled entities so we must first use the new bandwidth 1636 * There are throttled entities so we must first use the new bandwidth
1630 * to unthrottle them before making it generally available. This 1637 * to unthrottle them before making it generally available. This