aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorDmitry Adamushko <dmitry.adamushko@gmail.com>2008-07-01 17:32:15 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-18 06:55:14 -0400
commit7ebefa8ceefed44cc321be70afc54a585a68ac0b (patch)
treecab0118d96d704fcca2e52f192f4bb2a7f6a909e /kernel/sched_rt.c
parent13b40c1e40f3261e83ee514a08b77dbecb93021b (diff)
sched: rework of "prioritize non-migratable tasks over migratable ones"
(1) handle in a generic way all cases when a newly woken-up task is not migratable (not just a corner case when "rt_se->nr_cpus_allowed == 1") (2) if current is to be preempted, then make sure "p" will be picked up by pick_next_task_rt(). i.e. move task's group at the head of its list as well. currently, it's not a case for the group-scheduling case as described here: http://www.ussg.iu.edu/hypermail/linux/kernel/0807.0/0134.html Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c68
1 files changed, 40 insertions, 28 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 47ceac9e8552..d3d1cccb3d7b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
599 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) 599 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
600 return; 600 return;
601 601
602 if (rt_se->nr_cpus_allowed == 1) 602 list_add_tail(&rt_se->run_list, queue);
603 list_add(&rt_se->run_list, queue);
604 else
605 list_add_tail(&rt_se->run_list, queue);
606
607 __set_bit(rt_se_prio(rt_se), array->bitmap); 603 __set_bit(rt_se_prio(rt_se), array->bitmap);
608 604
609 inc_rt_tasks(rt_se, rt_rq); 605 inc_rt_tasks(rt_se, rt_rq);
@@ -688,32 +684,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
688 * Put task to the end of the run list without the overhead of dequeue 684 * Put task to the end of the run list without the overhead of dequeue
689 * followed by enqueue. 685 * followed by enqueue.
690 */ 686 */
691static 687static void
692void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 688requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
693{ 689{
694 struct rt_prio_array *array = &rt_rq->active;
695
696 if (on_rt_rq(rt_se)) { 690 if (on_rt_rq(rt_se)) {
697 list_del_init(&rt_se->run_list); 691 struct rt_prio_array *array = &rt_rq->active;
698 list_add_tail(&rt_se->run_list, 692 struct list_head *queue = array->queue + rt_se_prio(rt_se);
699 array->queue + rt_se_prio(rt_se)); 693
694 if (head)
695 list_move(&rt_se->run_list, queue);
696 else
697 list_move_tail(&rt_se->run_list, queue);
700 } 698 }
701} 699}
702 700
703static void requeue_task_rt(struct rq *rq, struct task_struct *p) 701static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
704{ 702{
705 struct sched_rt_entity *rt_se = &p->rt; 703 struct sched_rt_entity *rt_se = &p->rt;
706 struct rt_rq *rt_rq; 704 struct rt_rq *rt_rq;
707 705
708 for_each_sched_rt_entity(rt_se) { 706 for_each_sched_rt_entity(rt_se) {
709 rt_rq = rt_rq_of_se(rt_se); 707 rt_rq = rt_rq_of_se(rt_se);
710 requeue_rt_entity(rt_rq, rt_se); 708 requeue_rt_entity(rt_rq, rt_se, head);
711 } 709 }
712} 710}
713 711
714static void yield_task_rt(struct rq *rq) 712static void yield_task_rt(struct rq *rq)
715{ 713{
716 requeue_task_rt(rq, rq->curr); 714 requeue_task_rt(rq, rq->curr, 0);
717} 715}
718 716
719#ifdef CONFIG_SMP 717#ifdef CONFIG_SMP
@@ -753,6 +751,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
753 */ 751 */
754 return task_cpu(p); 752 return task_cpu(p);
755} 753}
754
755static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
756{
757 cpumask_t mask;
758
759 if (rq->curr->rt.nr_cpus_allowed == 1)
760 return;
761
762 if (p->rt.nr_cpus_allowed != 1
763 && cpupri_find(&rq->rd->cpupri, p, &mask))
764 return;
765
766 if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
767 return;
768
769 /*
770 * There appears to be other cpus that can accept
771 * current and none to run 'p', so lets reschedule
772 * to try and push current away:
773 */
774 requeue_task_rt(rq, p, 1);
775 resched_task(rq->curr);
776}
777
756#endif /* CONFIG_SMP */ 778#endif /* CONFIG_SMP */
757 779
758/* 780/*
@@ -778,18 +800,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
778 * to move current somewhere else, making room for our non-migratable 800 * to move current somewhere else, making room for our non-migratable
779 * task. 801 * task.
780 */ 802 */
781 if((p->prio == rq->curr->prio) 803 if (p->prio == rq->curr->prio && !need_resched())
782 && p->rt.nr_cpus_allowed == 1 804 check_preempt_equal_prio(rq, p);
783 && rq->curr->rt.nr_cpus_allowed != 1) {
784 cpumask_t mask;
785
786 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
787 /*
788 * There appears to be other cpus that can accept
789 * current, so lets reschedule to try and push it away
790 */
791 resched_task(rq->curr);
792 }
793#endif 805#endif
794} 806}
795 807
@@ -1415,7 +1427,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1415 * on the queue: 1427 * on the queue:
1416 */ 1428 */
1417 if (p->rt.run_list.prev != p->rt.run_list.next) { 1429 if (p->rt.run_list.prev != p->rt.run_list.next) {
1418 requeue_task_rt(rq, p); 1430 requeue_task_rt(rq, p, 0);
1419 set_tsk_need_resched(p); 1431 set_tsk_need_resched(p);
1420 } 1432 }
1421} 1433}