diff options
author | Dmitry Adamushko <dmitry.adamushko@gmail.com> | 2008-06-10 18:58:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-18 06:41:18 -0400 |
commit | 20b6331bfed1f07ba1e5006889a5d64adc53615e (patch) | |
tree | 9a98f9ccd201c875a94f8a2002f1eddcfc114d65 /kernel/sched_rt.c | |
parent | f7d62364b2cef85cbcd4feffdd3632ef7c3b61c2 (diff) |
sched: rework of "prioritize non-migratable tasks over migratable ones"
regarding this commit: 45c01e824991b2dd0a332e19efc4901acb31209f
I think we can do it simpler. Please take a look at the patch below.
Instead of having 2 separate arrays (which is + ~800 bytes on x86_32 and
twice so on x86_64), let's add "exclusive" (the ones that are bound to
this CPU) tasks to the head of the queue and "shared" ones -- to the
end.
In case of a few newly woken up "exclusive" tasks, they are 'stacked'
(not queued as now), meaning that a task {i+1} is being placed in front
of the previously woken up task {i}. But I don't think that this
behavior may cause any realistic problems.
There are a couple of changes on top of this one.
(1) in check_preempt_curr_rt()
I don't think there is a need for the "pick_next_rt_entity(rq, &rq->rt)
!= &rq->curr->rt" check.
enqueue_task_rt(p) and check_preempt_curr_rt() are always called one
after another with rq->lock being held so the following check
"p->rt.nr_cpus_allowed == 1 && rq->curr->rt.nr_cpus_allowed != 1" should
be enough (well, just its left part) to guarantee that 'p' has been
queued in front of the 'curr'.
(2) in set_cpus_allowed_rt()
I don't thinks there is a need for requeue_task_rt() here.
Perhaps, the only case when 'requeue' (+ reschedule) might be useful is
as follows:
i) weight == 1 && cpu_isset(task_cpu(p), *new_mask)
i.e. a task is being bound to this CPU);
ii) 'p' != rq->curr
but here, 'p' has already been on this CPU for a while and was not
migrated. i.e. it's possible that 'rq->curr' would not have high chances
to be migrated right at this particular moment (although, has chance in
a bit longer term), should we allow it to be preempted.
Anyway, I think we should not perhaps make it more complex trying to
address some rare corner cases. For instance, that's why a single queue
approach would be preferable. Unless I'm missing something obvious, this
approach gives us similar functionality at lower cost.
Verified only compilation-wise.
(Almost)-Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 44 |
1 files changed, 9 insertions, 35 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8ae3416e0bb4..f721b52acd8d 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -576,16 +576,15 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
576 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 576 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
577 | struct rt_prio_array *array = &rt_rq->active; | 577 | struct rt_prio_array *array = &rt_rq->active; |
578 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 578 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
579 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | ||
579 | 580 | ||
580 | if (group_rq && rt_rq_throttled(group_rq)) | 581 | if (group_rq && rt_rq_throttled(group_rq)) |
581 | return; | 582 | return; |
582 | 583 | ||
583 | if (rt_se->nr_cpus_allowed == 1) | 584 | if (rt_se->nr_cpus_allowed == 1) |
584 | list_add_tail(&rt_se->run_list, | 585 | list_add(&rt_se->run_list, queue); |
585 | array->xqueue + rt_se_prio(rt_se)); | ||
586 | else | 586 | else |
587 | list_add_tail(&rt_se->run_list, | 587 | list_add_tail(&rt_se->run_list, queue); |
588 | array->squeue + rt_se_prio(rt_se)); | ||
589 | 588 | ||
590 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 589 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
591 | 590 | ||
@@ -598,8 +597,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
598 | struct rt_prio_array *array = &rt_rq->active; | 597 | struct rt_prio_array *array = &rt_rq->active; |
599 | 598 | ||
600 | list_del_init(&rt_se->run_list); | 599 | list_del_init(&rt_se->run_list); |
601 | if (list_empty(array->squeue + rt_se_prio(rt_se)) | 600 | if (list_empty(array->queue + rt_se_prio(rt_se))) |
602 | && list_empty(array->xqueue + rt_se_prio(rt_se))) | ||
603 | __clear_bit(rt_se_prio(rt_se), array->bitmap); | 601 | __clear_bit(rt_se_prio(rt_se), array->bitmap); |
604 | 602 | ||
605 | dec_rt_tasks(rt_se, rt_rq); | 603 | dec_rt_tasks(rt_se, rt_rq); |
@@ -666,11 +664,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
666 | /* | 664 | /* |
667 | * Put task to the end of the run list without the overhead of dequeue | 665 | * Put task to the end of the run list without the overhead of dequeue |
668 | * followed by enqueue. | 666 | * followed by enqueue. |
669 | * | ||
670 | * Note: We always enqueue the task to the shared-queue, regardless of its | ||
671 | * previous position w.r.t. exclusive vs shared. This is so that exclusive RR | ||
672 | * tasks fairly round-robin with all tasks on the runqueue, not just other | ||
673 | * exclusive tasks. | ||
674 | */ | 667 | */ |
675 | static | 668 | static |
676 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 669 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) |
@@ -678,7 +671,7 @@ void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | |||
678 | struct rt_prio_array *array = &rt_rq->active; | 671 | struct rt_prio_array *array = &rt_rq->active; |
679 | 672 | ||
680 | list_del_init(&rt_se->run_list); | 673 | list_del_init(&rt_se->run_list); |
681 | list_add_tail(&rt_se->run_list, array->squeue + rt_se_prio(rt_se)); | 674 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); |
682 | } | 675 | } |
683 | 676 | ||
684 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 677 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) |
@@ -736,9 +729,6 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
736 | } | 729 | } |
737 | #endif /* CONFIG_SMP */ | 730 | #endif /* CONFIG_SMP */ |
738 | 731 | ||
739 | static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, | ||
740 | struct rt_rq *rt_rq); | ||
741 | |||
742 | /* | 732 | /* |
743 | * Preempt the current task with a newly woken task if needed: | 733 | * Preempt the current task with a newly woken task if needed: |
744 | */ | 734 | */ |
@@ -764,8 +754,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |||
764 | */ | 754 | */ |
765 | if((p->prio == rq->curr->prio) | 755 | if((p->prio == rq->curr->prio) |
766 | && p->rt.nr_cpus_allowed == 1 | 756 | && p->rt.nr_cpus_allowed == 1 |
767 | && rq->curr->rt.nr_cpus_allowed != 1 | 757 | && rq->curr->rt.nr_cpus_allowed != 1) { |
768 | && pick_next_rt_entity(rq, &rq->rt) != &rq->curr->rt) { | ||
769 | cpumask_t mask; | 758 | cpumask_t mask; |
770 | 759 | ||
771 | if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | 760 | if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) |
@@ -789,15 +778,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, | |||
789 | idx = sched_find_first_bit(array->bitmap); | 778 | idx = sched_find_first_bit(array->bitmap); |
790 | BUG_ON(idx >= MAX_RT_PRIO); | 779 | BUG_ON(idx >= MAX_RT_PRIO); |
791 | 780 | ||
792 | queue = array->xqueue + idx; | 781 | queue = array->queue + idx; |
793 | if (!list_empty(queue)) | 782 | next = list_entry(queue->next, struct sched_rt_entity, run_list); |
794 | next = list_entry(queue->next, struct sched_rt_entity, | ||
795 | run_list); | ||
796 | else { | ||
797 | queue = array->squeue + idx; | ||
798 | next = list_entry(queue->next, struct sched_rt_entity, | ||
799 | run_list); | ||
800 | } | ||
801 | 783 | ||
802 | return next; | 784 | return next; |
803 | } | 785 | } |
@@ -867,7 +849,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
867 | continue; | 849 | continue; |
868 | if (next && next->prio < idx) | 850 | if (next && next->prio < idx) |
869 | continue; | 851 | continue; |
870 | list_for_each_entry(rt_se, array->squeue + idx, run_list) { | 852 | list_for_each_entry(rt_se, array->queue + idx, run_list) { |
871 | struct task_struct *p = rt_task_of(rt_se); | 853 | struct task_struct *p = rt_task_of(rt_se); |
872 | if (pick_rt_task(rq, p, cpu)) { | 854 | if (pick_rt_task(rq, p, cpu)) { |
873 | next = p; | 855 | next = p; |
@@ -1249,14 +1231,6 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1249 | } | 1231 | } |
1250 | 1232 | ||
1251 | update_rt_migration(rq); | 1233 | update_rt_migration(rq); |
1252 | |||
1253 | if (unlikely(weight == 1 || p->rt.nr_cpus_allowed == 1)) | ||
1254 | /* | ||
1255 | * If either the new or old weight is a "1", we need | ||
1256 | * to requeue to properly move between shared and | ||
1257 | * exclusive queues. | ||
1258 | */ | ||
1259 | requeue_task_rt(rq, p); | ||
1260 | } | 1234 | } |
1261 | 1235 | ||
1262 | p->cpus_allowed = *new_mask; | 1236 | p->cpus_allowed = *new_mask; |