diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 52 |
1 files changed, 50 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 1d4acbea9e60..f9f671a7d0af 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -970,6 +970,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
970 | se->vruntime = vruntime; | 970 | se->vruntime = vruntime; |
971 | } | 971 | } |
972 | 972 | ||
973 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); | ||
974 | |||
973 | static void | 975 | static void |
974 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | 976 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
975 | { | 977 | { |
@@ -999,8 +1001,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
999 | __enqueue_entity(cfs_rq, se); | 1001 | __enqueue_entity(cfs_rq, se); |
1000 | se->on_rq = 1; | 1002 | se->on_rq = 1; |
1001 | 1003 | ||
1002 | if (cfs_rq->nr_running == 1) | 1004 | if (cfs_rq->nr_running == 1) { |
1003 | list_add_leaf_cfs_rq(cfs_rq); | 1005 | list_add_leaf_cfs_rq(cfs_rq); |
1006 | check_enqueue_throttle(cfs_rq); | ||
1007 | } | ||
1004 | } | 1008 | } |
1005 | 1009 | ||
1006 | static void __clear_buddies_last(struct sched_entity *se) | 1010 | static void __clear_buddies_last(struct sched_entity *se) |
@@ -1202,6 +1206,8 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | |||
1202 | return se; | 1206 | return se; |
1203 | } | 1207 | } |
1204 | 1208 | ||
1209 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq); | ||
1210 | |||
1205 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | 1211 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) |
1206 | { | 1212 | { |
1207 | /* | 1213 | /* |
@@ -1211,6 +1217,9 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
1211 | if (prev->on_rq) | 1217 | if (prev->on_rq) |
1212 | update_curr(cfs_rq); | 1218 | update_curr(cfs_rq); |
1213 | 1219 | ||
1220 | /* throttle cfs_rqs exceeding runtime */ | ||
1221 | check_cfs_rq_runtime(cfs_rq); | ||
1222 | |||
1214 | check_spread(cfs_rq, prev); | 1223 | check_spread(cfs_rq, prev); |
1215 | if (prev->on_rq) { | 1224 | if (prev->on_rq) { |
1216 | update_stats_wait_start(cfs_rq, prev); | 1225 | update_stats_wait_start(cfs_rq, prev); |
@@ -1464,7 +1473,7 @@ static int tg_throttle_down(struct task_group *tg, void *data) | |||
1464 | return 0; | 1473 | return 0; |
1465 | } | 1474 | } |
1466 | 1475 | ||
1467 | static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) | 1476 | static void throttle_cfs_rq(struct cfs_rq *cfs_rq) |
1468 | { | 1477 | { |
1469 | struct rq *rq = rq_of(cfs_rq); | 1478 | struct rq *rq = rq_of(cfs_rq); |
1470 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | 1479 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
@@ -1657,9 +1666,48 @@ out_unlock: | |||
1657 | 1666 | ||
1658 | return idle; | 1667 | return idle; |
1659 | } | 1668 | } |
1669 | |||
1670 | /* | ||
1671 | * When a group wakes up we want to make sure that its quota is not already | ||
1672 | * expired/exceeded, otherwise it may be allowed to steal additional ticks of | ||
1673 | * runtime as update_curr() throttling can not not trigger until it's on-rq. | ||
1674 | */ | ||
1675 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) | ||
1676 | { | ||
1677 | /* an active group must be handled by the update_curr()->put() path */ | ||
1678 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) | ||
1679 | return; | ||
1680 | |||
1681 | /* ensure the group is not already throttled */ | ||
1682 | if (cfs_rq_throttled(cfs_rq)) | ||
1683 | return; | ||
1684 | |||
1685 | /* update runtime allocation */ | ||
1686 | account_cfs_rq_runtime(cfs_rq, 0); | ||
1687 | if (cfs_rq->runtime_remaining <= 0) | ||
1688 | throttle_cfs_rq(cfs_rq); | ||
1689 | } | ||
1690 | |||
1691 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ | ||
1692 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) | ||
1693 | { | ||
1694 | if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) | ||
1695 | return; | ||
1696 | |||
1697 | /* | ||
1698 | * it's possible for a throttled entity to be forced into a running | ||
1699 | * state (e.g. set_curr_task), in this case we're finished. | ||
1700 | */ | ||
1701 | if (cfs_rq_throttled(cfs_rq)) | ||
1702 | return; | ||
1703 | |||
1704 | throttle_cfs_rq(cfs_rq); | ||
1705 | } | ||
1660 | #else | 1706 | #else |
1661 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 1707 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, |
1662 | unsigned long delta_exec) {} | 1708 | unsigned long delta_exec) {} |
1709 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | ||
1710 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | ||
1663 | 1711 | ||
1664 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) | 1712 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) |
1665 | { | 1713 | { |