diff options
author | Dimitri Sivanich <sivanich@sgi.com> | 2008-10-31 09:03:41 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-03 05:29:00 -0500 |
commit | e113a745f693af196c8081b328bf42def086989b (patch) | |
tree | 70d0576dfebdd0207093372b70115776f03bc16e /kernel | |
parent | 45beca08dd8b6d6a65c5ffd730af2eac7a2c7a03 (diff) |
sched/rt: small optimization to update_curr_rt()
Impact: micro-optimization to SCHED_FIFO/RR scheduling
A very minor improvement, but might it be better to check sched_rt_runtime(rt_rq)
before taking the rt_runtime_lock?
Peter Zijlstra observes:
> Yes, I think its ok to do so.
>
> Like pointed out in the other thread, there are two races:
>
> - sched_rt_runtime() going to RUNTIME_INF, and that will be handled
> properly by sched_rt_runtime_exceeded()
>
> - sched_rt_runtime() going to !RUNTIME_INF, and here we can miss an
> accounting cycle, but I don't think that is something to worry too
> much about.
Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
--
kernel/sched_rt.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_rt.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d9ba9d5f99d..c7963d5d062 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -537,13 +537,13 @@ static void update_curr_rt(struct rq *rq) | |||
537 | for_each_sched_rt_entity(rt_se) { | 537 | for_each_sched_rt_entity(rt_se) { |
538 | rt_rq = rt_rq_of_se(rt_se); | 538 | rt_rq = rt_rq_of_se(rt_se); |
539 | 539 | ||
540 | spin_lock(&rt_rq->rt_runtime_lock); | ||
541 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 540 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
541 | spin_lock(&rt_rq->rt_runtime_lock); | ||
542 | rt_rq->rt_time += delta_exec; | 542 | rt_rq->rt_time += delta_exec; |
543 | if (sched_rt_runtime_exceeded(rt_rq)) | 543 | if (sched_rt_runtime_exceeded(rt_rq)) |
544 | resched_task(curr); | 544 | resched_task(curr); |
545 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
545 | } | 546 | } |
546 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
547 | } | 547 | } |
548 | } | 548 | } |
549 | 549 | ||