diff options
author | Mike Galbraith <efault@gmx.de> | 2011-04-29 02:36:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-16 05:01:17 -0400 |
commit | 61eadef6a9bde9ea62fda724a9cb501ce9bc925a (patch) | |
tree | 52ffdd4251e921681f086ec6c4e14ad6e5f9472d /kernel/sched.c | |
parent | 3e51e3edfd81bfd9853ad7de91167e4ce33d0fe7 (diff) |
sched, rt: Update rq clock when unthrottling of an otherwise idle CPU
If an RT task is awakened while it's rt_rq is throttled, the time between
wakeup/enqueue and unthrottle/selection may be accounted as rt_time
if the CPU is idle. Set rq->skip_clock_update negative upon throttle
release to tell put_prev_task() that we need a clock update.
Reported-by: Thomas Giesel <skoe@directbox.com>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1304059010.7472.1.camel@marge.simson.net
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f9778c0d91e2..b8b9a7dac9b0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -466,7 +466,7 @@ struct rq { | |||
466 | u64 nohz_stamp; | 466 | u64 nohz_stamp; |
467 | unsigned char nohz_balance_kick; | 467 | unsigned char nohz_balance_kick; |
468 | #endif | 468 | #endif |
469 | unsigned int skip_clock_update; | 469 | int skip_clock_update; |
470 | 470 | ||
471 | /* capture load from *all* tasks on this cpu: */ | 471 | /* capture load from *all* tasks on this cpu: */ |
472 | struct load_weight load; | 472 | struct load_weight load; |
@@ -652,7 +652,7 @@ static void update_rq_clock(struct rq *rq) | |||
652 | { | 652 | { |
653 | s64 delta; | 653 | s64 delta; |
654 | 654 | ||
655 | if (rq->skip_clock_update) | 655 | if (rq->skip_clock_update > 0) |
656 | return; | 656 | return; |
657 | 657 | ||
658 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; | 658 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
@@ -4127,7 +4127,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
4127 | 4127 | ||
4128 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | 4128 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
4129 | { | 4129 | { |
4130 | if (prev->on_rq) | 4130 | if (prev->on_rq || rq->skip_clock_update < 0) |
4131 | update_rq_clock(rq); | 4131 | update_rq_clock(rq); |
4132 | prev->sched_class->put_prev_task(rq, prev); | 4132 | prev->sched_class->put_prev_task(rq, prev); |
4133 | } | 4133 | } |