diff options
author | Mike Galbraith <efault@gmx.de> | 2011-04-29 02:36:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-16 05:01:17 -0400 |
commit | 61eadef6a9bde9ea62fda724a9cb501ce9bc925a (patch) | |
tree | 52ffdd4251e921681f086ec6c4e14ad6e5f9472d /kernel | |
parent | 3e51e3edfd81bfd9853ad7de91167e4ce33d0fe7 (diff) |
sched, rt: Update rq clock when unthrottling of an otherwise idle CPU
If an RT task is awakened while it's rt_rq is throttled, the time between
wakeup/enqueue and unthrottle/selection may be accounted as rt_time
if the CPU is idle. Set rq->skip_clock_update negative upon throttle
release to tell put_prev_task() that we need a clock update.
Reported-by: Thomas Giesel <skoe@directbox.com>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1304059010.7472.1.camel@marge.simson.net
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/sched_rt.c | 7 |
2 files changed, 10 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f9778c0d91e2..b8b9a7dac9b0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -466,7 +466,7 @@ struct rq { | |||
466 | u64 nohz_stamp; | 466 | u64 nohz_stamp; |
467 | unsigned char nohz_balance_kick; | 467 | unsigned char nohz_balance_kick; |
468 | #endif | 468 | #endif |
469 | unsigned int skip_clock_update; | 469 | int skip_clock_update; |
470 | 470 | ||
471 | /* capture load from *all* tasks on this cpu: */ | 471 | /* capture load from *all* tasks on this cpu: */ |
472 | struct load_weight load; | 472 | struct load_weight load; |
@@ -652,7 +652,7 @@ static void update_rq_clock(struct rq *rq) | |||
652 | { | 652 | { |
653 | s64 delta; | 653 | s64 delta; |
654 | 654 | ||
655 | if (rq->skip_clock_update) | 655 | if (rq->skip_clock_update > 0) |
656 | return; | 656 | return; |
657 | 657 | ||
658 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; | 658 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
@@ -4127,7 +4127,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
4127 | 4127 | ||
4128 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | 4128 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
4129 | { | 4129 | { |
4130 | if (prev->on_rq) | 4130 | if (prev->on_rq || rq->skip_clock_update < 0) |
4131 | update_rq_clock(rq); | 4131 | update_rq_clock(rq); |
4132 | prev->sched_class->put_prev_task(rq, prev); | 4132 | prev->sched_class->put_prev_task(rq, prev); |
4133 | } | 4133 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 19ecb3127379..0943ed7a4038 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -562,6 +562,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
562 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { | 562 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { |
563 | rt_rq->rt_throttled = 0; | 563 | rt_rq->rt_throttled = 0; |
564 | enqueue = 1; | 564 | enqueue = 1; |
565 | |||
566 | /* | ||
567 | * Force a clock update if the CPU was idle, | ||
568 | * lest wakeup -> unthrottle time accumulate. | ||
569 | */ | ||
570 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) | ||
571 | rq->skip_clock_update = -1; | ||
565 | } | 572 | } |
566 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 573 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
567 | idle = 0; | 574 | idle = 0; |