aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2011-07-21 12:43:39 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 06:03:47 -0400
commitd3d9dc3302368269acf94b7381663b93000fe2fe (patch)
tree4659e1f9efdea0f15ab5ef551c718416fb8e7954 /kernel
parent8cb120d3e41a0464a559d639d519cef563717a4e (diff)
sched: Throttle entities exceeding their allowed bandwidth
With the machinery in place to throttle and unthrottle entities, as well as handle their participation (or lack there of) we can now enable throttling. There are 2 points that we must check whether it's time to set throttled state: put_prev_entity() and enqueue_entity(). - put_prev_entity() is the typical throttle path, we reach it by exceeding our allocated run-time within update_curr()->account_cfs_rq_runtime() and going through a reschedule. - enqueue_entity() covers the case of a wake-up into an already throttled group. In this case we know the group cannot be on_rq and can throttle immediately. Checks are added at time of put_prev_entity() and enqueue_entity() Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184758.091415417@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c52
1 files changed, 50 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 1d4acbea9e60..f9f671a7d0af 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -970,6 +970,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
970 se->vruntime = vruntime; 970 se->vruntime = vruntime;
971} 971}
972 972
973static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
974
973static void 975static void
974enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 976enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
975{ 977{
@@ -999,8 +1001,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
999 __enqueue_entity(cfs_rq, se); 1001 __enqueue_entity(cfs_rq, se);
1000 se->on_rq = 1; 1002 se->on_rq = 1;
1001 1003
1002 if (cfs_rq->nr_running == 1) 1004 if (cfs_rq->nr_running == 1) {
1003 list_add_leaf_cfs_rq(cfs_rq); 1005 list_add_leaf_cfs_rq(cfs_rq);
1006 check_enqueue_throttle(cfs_rq);
1007 }
1004} 1008}
1005 1009
1006static void __clear_buddies_last(struct sched_entity *se) 1010static void __clear_buddies_last(struct sched_entity *se)
@@ -1202,6 +1206,8 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1202 return se; 1206 return se;
1203} 1207}
1204 1208
1209static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1210
1205static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 1211static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1206{ 1212{
1207 /* 1213 /*
@@ -1211,6 +1217,9 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1211 if (prev->on_rq) 1217 if (prev->on_rq)
1212 update_curr(cfs_rq); 1218 update_curr(cfs_rq);
1213 1219
1220 /* throttle cfs_rqs exceeding runtime */
1221 check_cfs_rq_runtime(cfs_rq);
1222
1214 check_spread(cfs_rq, prev); 1223 check_spread(cfs_rq, prev);
1215 if (prev->on_rq) { 1224 if (prev->on_rq) {
1216 update_stats_wait_start(cfs_rq, prev); 1225 update_stats_wait_start(cfs_rq, prev);
@@ -1464,7 +1473,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
1464 return 0; 1473 return 0;
1465} 1474}
1466 1475
1467static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) 1476static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
1468{ 1477{
1469 struct rq *rq = rq_of(cfs_rq); 1478 struct rq *rq = rq_of(cfs_rq);
1470 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 1479 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
@@ -1657,9 +1666,48 @@ out_unlock:
1657 1666
1658 return idle; 1667 return idle;
1659} 1668}
1669
1670/*
1671 * When a group wakes up we want to make sure that its quota is not already
1672 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
1673 * runtime as update_curr() throttling can not not trigger until it's on-rq.
1674 */
1675static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
1676{
1677 /* an active group must be handled by the update_curr()->put() path */
1678 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
1679 return;
1680
1681 /* ensure the group is not already throttled */
1682 if (cfs_rq_throttled(cfs_rq))
1683 return;
1684
1685 /* update runtime allocation */
1686 account_cfs_rq_runtime(cfs_rq, 0);
1687 if (cfs_rq->runtime_remaining <= 0)
1688 throttle_cfs_rq(cfs_rq);
1689}
1690
1691/* conditionally throttle active cfs_rq's from put_prev_entity() */
1692static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1693{
1694 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
1695 return;
1696
1697 /*
1698 * it's possible for a throttled entity to be forced into a running
1699 * state (e.g. set_curr_task), in this case we're finished.
1700 */
1701 if (cfs_rq_throttled(cfs_rq))
1702 return;
1703
1704 throttle_cfs_rq(cfs_rq);
1705}
1660#else 1706#else
1661static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 1707static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1662 unsigned long delta_exec) {} 1708 unsigned long delta_exec) {}
1709static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
1710static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
1663 1711
1664static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 1712static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
1665{ 1713{