diff options
author | Paul Turner <pjt@google.com> | 2011-07-21 12:43:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-08-14 06:03:28 -0400 |
commit | 58088ad0152ba4b7997388c93d0ca208ec1ece75 (patch) | |
tree | 22d818b745056effc53ee6fa97ee9103548766b5 /kernel/sched_fair.c | |
parent | ec12cb7f31e28854efae7dd6f9544e0a66379040 (diff) |
sched: Add a timer to handle CFS bandwidth refresh
This patch adds a per-task_group timer which handles the refresh of the global
CFS bandwidth pool.
Since the RT pool is using a similar timer there's some small refactoring to
share this support.
Signed-off-by: Paul Turner <pjt@google.com>
Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110721184757.277271273@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 40 |
1 files changed, 37 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9502aa899f73..af73a8a85eef 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1284,9 +1284,16 @@ static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
1284 | raw_spin_lock(&cfs_b->lock); | 1284 | raw_spin_lock(&cfs_b->lock); |
1285 | if (cfs_b->quota == RUNTIME_INF) | 1285 | if (cfs_b->quota == RUNTIME_INF) |
1286 | amount = min_amount; | 1286 | amount = min_amount; |
1287 | else if (cfs_b->runtime > 0) { | 1287 | else { |
1288 | amount = min(cfs_b->runtime, min_amount); | 1288 | /* ensure bandwidth timer remains active under consumption */ |
1289 | cfs_b->runtime -= amount; | 1289 | if (!cfs_b->timer_active) |
1290 | __start_cfs_bandwidth(cfs_b); | ||
1291 | |||
1292 | if (cfs_b->runtime > 0) { | ||
1293 | amount = min(cfs_b->runtime, min_amount); | ||
1294 | cfs_b->runtime -= amount; | ||
1295 | cfs_b->idle = 0; | ||
1296 | } | ||
1290 | } | 1297 | } |
1291 | raw_spin_unlock(&cfs_b->lock); | 1298 | raw_spin_unlock(&cfs_b->lock); |
1292 | 1299 | ||
@@ -1315,6 +1322,33 @@ static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | |||
1315 | __account_cfs_rq_runtime(cfs_rq, delta_exec); | 1322 | __account_cfs_rq_runtime(cfs_rq, delta_exec); |
1316 | } | 1323 | } |
1317 | 1324 | ||
1325 | /* | ||
1326 | * Responsible for refilling a task_group's bandwidth and unthrottling its | ||
1327 | * cfs_rqs as appropriate. If there has been no activity within the last | ||
1328 | * period the timer is deactivated until scheduling resumes; cfs_b->idle is | ||
1329 | * used to track this state. | ||
1330 | */ | ||
1331 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) | ||
1332 | { | ||
1333 | int idle = 1; | ||
1334 | |||
1335 | raw_spin_lock(&cfs_b->lock); | ||
1336 | /* no need to continue the timer with no bandwidth constraint */ | ||
1337 | if (cfs_b->quota == RUNTIME_INF) | ||
1338 | goto out_unlock; | ||
1339 | |||
1340 | idle = cfs_b->idle; | ||
1341 | cfs_b->runtime = cfs_b->quota; | ||
1342 | |||
1343 | /* mark as potentially idle for the upcoming period */ | ||
1344 | cfs_b->idle = 1; | ||
1345 | out_unlock: | ||
1346 | if (idle) | ||
1347 | cfs_b->timer_active = 0; | ||
1348 | raw_spin_unlock(&cfs_b->lock); | ||
1349 | |||
1350 | return idle; | ||
1351 | } | ||
1318 | #else | 1352 | #else |
1319 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | 1353 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, |
1320 | unsigned long delta_exec) {} | 1354 | unsigned long delta_exec) {} |