diff options
author | Dmitry Adamushko <dmitry.adamushko@gmail.com> | 2008-01-25 15:08:34 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:34 -0500 |
commit | 326587b840785c60f5dc18557235a23bafefd620 (patch) | |
tree | d96bfb249ca234298a4f2e40a56bce7041523ea6 /kernel/sched_rt.c | |
parent | e118adef232e637a8f091c1ded2fbf44fcf3ecc8 (diff) |
sched: fix goto retry in pick_next_task_rt()
looking at it one more time:
(1) it looks to me that there is no need to call
sched_rt_ratio_exceeded() from pick_next_rt_entity()
- [ for CONFIG_FAIR_GROUP_SCHED ] queues with rt_rq->rt_throttled are
not within this 'tree-like hierarchy' (or whatever we should call it
:-)
- there is also no need to re-check 'rt_rq->rt_time > ratio' at this
point as 'rt_rq->rt_time' couldn't have been increased since the last
call to update_curr_rt() (which obviously calls
sched_rt_ratio_esceeded())
well, it might be that 'ratio' for this rt_rq has been re-configured
(and the period over which this rt_rq was active has not yet been
finished)... but I don't think we should really take this into
account.
(2) now pick_next_rt_entity() must never return NULL, so let's change
pick_next_task_rt() accordingly.
Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 9 |
1 files changed, 2 insertions, 7 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 2dac5ebb8bcb..274b40d7bef2 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -476,15 +476,12 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, | |||
476 | struct list_head *queue; | 476 | struct list_head *queue; |
477 | int idx; | 477 | int idx; |
478 | 478 | ||
479 | if (sched_rt_ratio_exceeded(rt_rq)) | ||
480 | goto out; | ||
481 | |||
482 | idx = sched_find_first_bit(array->bitmap); | 479 | idx = sched_find_first_bit(array->bitmap); |
483 | BUG_ON(idx >= MAX_RT_PRIO); | 480 | BUG_ON(idx >= MAX_RT_PRIO); |
484 | 481 | ||
485 | queue = array->queue + idx; | 482 | queue = array->queue + idx; |
486 | next = list_entry(queue->next, struct sched_rt_entity, run_list); | 483 | next = list_entry(queue->next, struct sched_rt_entity, run_list); |
487 | out: | 484 | |
488 | return next; | 485 | return next; |
489 | } | 486 | } |
490 | 487 | ||
@@ -494,7 +491,6 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
494 | struct task_struct *p; | 491 | struct task_struct *p; |
495 | struct rt_rq *rt_rq; | 492 | struct rt_rq *rt_rq; |
496 | 493 | ||
497 | retry: | ||
498 | rt_rq = &rq->rt; | 494 | rt_rq = &rq->rt; |
499 | 495 | ||
500 | if (unlikely(!rt_rq->rt_nr_running)) | 496 | if (unlikely(!rt_rq->rt_nr_running)) |
@@ -505,8 +501,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
505 | 501 | ||
506 | do { | 502 | do { |
507 | rt_se = pick_next_rt_entity(rq, rt_rq); | 503 | rt_se = pick_next_rt_entity(rq, rt_rq); |
508 | if (unlikely(!rt_se)) | 504 | BUG_ON(!rt_se); |
509 | goto retry; | ||
510 | rt_rq = group_rt_rq(rt_se); | 505 | rt_rq = group_rt_rq(rt_se); |
511 | } while (rt_rq); | 506 | } while (rt_rq); |
512 | 507 | ||