diff options
author | Paul Turner <pjt@google.com> | 2011-07-21 12:43:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-08-14 06:03:34 -0400 |
commit | 85dac906bec3bb41bfaa7ccaa65c4706de5cfdf8 (patch) | |
tree | 5d3f71d409fe002b0dfd2cdfb7ef09a9d5851f66 /kernel/sched.c | |
parent | a9cf55b2861057a213e610da2fec52125439a11d (diff) |
sched: Add support for throttling group entities
Now that consumption is tracked (via update_curr()) we add support to throttle
group entities (and their corresponding cfs_rqs) in the case where this is no
run-time remaining.
Throttled entities are dequeued to prevent scheduling, additionally we mark
them as throttled (using cfs_rq->throttled) to prevent them from becoming
re-enqueued until they are unthrottled. A list of a task_group's throttled
entities are maintained on the cfs_bandwidth structure.
Note: While the machinery for throttling is added in this patch the act of
throttling an entity exceeding its bandwidth is deferred until later within
the series.
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110721184757.480608533@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a2d55144bd9c..044260a9418d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -260,6 +260,8 @@ struct cfs_bandwidth { | |||
260 | 260 | ||
261 | int idle, timer_active; | 261 | int idle, timer_active; |
262 | struct hrtimer period_timer; | 262 | struct hrtimer period_timer; |
263 | struct list_head throttled_cfs_rq; | ||
264 | |||
263 | #endif | 265 | #endif |
264 | }; | 266 | }; |
265 | 267 | ||
@@ -399,6 +401,9 @@ struct cfs_rq { | |||
399 | int runtime_enabled; | 401 | int runtime_enabled; |
400 | u64 runtime_expires; | 402 | u64 runtime_expires; |
401 | s64 runtime_remaining; | 403 | s64 runtime_remaining; |
404 | |||
405 | int throttled; | ||
406 | struct list_head throttled_list; | ||
402 | #endif | 407 | #endif |
403 | #endif | 408 | #endif |
404 | }; | 409 | }; |
@@ -441,6 +446,7 @@ static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
441 | cfs_b->quota = RUNTIME_INF; | 446 | cfs_b->quota = RUNTIME_INF; |
442 | cfs_b->period = ns_to_ktime(default_cfs_period()); | 447 | cfs_b->period = ns_to_ktime(default_cfs_period()); |
443 | 448 | ||
449 | INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); | ||
444 | hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 450 | hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
445 | cfs_b->period_timer.function = sched_cfs_period_timer; | 451 | cfs_b->period_timer.function = sched_cfs_period_timer; |
446 | } | 452 | } |
@@ -448,6 +454,7 @@ static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
448 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 454 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
449 | { | 455 | { |
450 | cfs_rq->runtime_enabled = 0; | 456 | cfs_rq->runtime_enabled = 0; |
457 | INIT_LIST_HEAD(&cfs_rq->throttled_list); | ||
451 | } | 458 | } |
452 | 459 | ||
453 | /* requires cfs_b->lock, may release to reprogram timer */ | 460 | /* requires cfs_b->lock, may release to reprogram timer */ |