diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 77 |
1 files changed, 49 insertions, 28 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 7c9614728c59..f85a76363eee 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -505,7 +505,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
505 | rt_rq->rt_nr_running++; | 505 | rt_rq->rt_nr_running++; |
506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { | 507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { |
508 | #ifdef CONFIG_SMP | ||
508 | struct rq *rq = rq_of_rt_rq(rt_rq); | 509 | struct rq *rq = rq_of_rt_rq(rt_rq); |
510 | #endif | ||
509 | 511 | ||
510 | rt_rq->highest_prio = rt_se_prio(rt_se); | 512 | rt_rq->highest_prio = rt_se_prio(rt_se); |
511 | #ifdef CONFIG_SMP | 513 | #ifdef CONFIG_SMP |
@@ -599,11 +601,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
599 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 601 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
600 | return; | 602 | return; |
601 | 603 | ||
602 | if (rt_se->nr_cpus_allowed == 1) | 604 | list_add_tail(&rt_se->run_list, queue); |
603 | list_add(&rt_se->run_list, queue); | ||
604 | else | ||
605 | list_add_tail(&rt_se->run_list, queue); | ||
606 | |||
607 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 605 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
608 | 606 | ||
609 | inc_rt_tasks(rt_se, rt_rq); | 607 | inc_rt_tasks(rt_se, rt_rq); |
@@ -688,32 +686,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
688 | * Put task to the end of the run list without the overhead of dequeue | 686 | * Put task to the end of the run list without the overhead of dequeue |
689 | * followed by enqueue. | 687 | * followed by enqueue. |
690 | */ | 688 | */ |
691 | static | 689 | static void |
692 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 690 | requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) |
693 | { | 691 | { |
694 | struct rt_prio_array *array = &rt_rq->active; | ||
695 | |||
696 | if (on_rt_rq(rt_se)) { | 692 | if (on_rt_rq(rt_se)) { |
697 | list_del_init(&rt_se->run_list); | 693 | struct rt_prio_array *array = &rt_rq->active; |
698 | list_add_tail(&rt_se->run_list, | 694 | struct list_head *queue = array->queue + rt_se_prio(rt_se); |
699 | array->queue + rt_se_prio(rt_se)); | 695 | |
696 | if (head) | ||
697 | list_move(&rt_se->run_list, queue); | ||
698 | else | ||
699 | list_move_tail(&rt_se->run_list, queue); | ||
700 | } | 700 | } |
701 | } | 701 | } |
702 | 702 | ||
703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) |
704 | { | 704 | { |
705 | struct sched_rt_entity *rt_se = &p->rt; | 705 | struct sched_rt_entity *rt_se = &p->rt; |
706 | struct rt_rq *rt_rq; | 706 | struct rt_rq *rt_rq; |
707 | 707 | ||
708 | for_each_sched_rt_entity(rt_se) { | 708 | for_each_sched_rt_entity(rt_se) { |
709 | rt_rq = rt_rq_of_se(rt_se); | 709 | rt_rq = rt_rq_of_se(rt_se); |
710 | requeue_rt_entity(rt_rq, rt_se); | 710 | requeue_rt_entity(rt_rq, rt_se, head); |
711 | } | 711 | } |
712 | } | 712 | } |
713 | 713 | ||
714 | static void yield_task_rt(struct rq *rq) | 714 | static void yield_task_rt(struct rq *rq) |
715 | { | 715 | { |
716 | requeue_task_rt(rq, rq->curr); | 716 | requeue_task_rt(rq, rq->curr, 0); |
717 | } | 717 | } |
718 | 718 | ||
719 | #ifdef CONFIG_SMP | 719 | #ifdef CONFIG_SMP |
@@ -753,6 +753,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
753 | */ | 753 | */ |
754 | return task_cpu(p); | 754 | return task_cpu(p); |
755 | } | 755 | } |
756 | |||
757 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | ||
758 | { | ||
759 | cpumask_t mask; | ||
760 | |||
761 | if (rq->curr->rt.nr_cpus_allowed == 1) | ||
762 | return; | ||
763 | |||
764 | if (p->rt.nr_cpus_allowed != 1 | ||
765 | && cpupri_find(&rq->rd->cpupri, p, &mask)) | ||
766 | return; | ||
767 | |||
768 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
769 | return; | ||
770 | |||
771 | /* | ||
772 | * There appears to be other cpus that can accept | ||
773 | * current and none to run 'p', so lets reschedule | ||
774 | * to try and push current away: | ||
775 | */ | ||
776 | requeue_task_rt(rq, p, 1); | ||
777 | resched_task(rq->curr); | ||
778 | } | ||
779 | |||
756 | #endif /* CONFIG_SMP */ | 780 | #endif /* CONFIG_SMP */ |
757 | 781 | ||
758 | /* | 782 | /* |
@@ -778,18 +802,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |||
778 | * to move current somewhere else, making room for our non-migratable | 802 | * to move current somewhere else, making room for our non-migratable |
779 | * task. | 803 | * task. |
780 | */ | 804 | */ |
781 | if((p->prio == rq->curr->prio) | 805 | if (p->prio == rq->curr->prio && !need_resched()) |
782 | && p->rt.nr_cpus_allowed == 1 | 806 | check_preempt_equal_prio(rq, p); |
783 | && rq->curr->rt.nr_cpus_allowed != 1) { | ||
784 | cpumask_t mask; | ||
785 | |||
786 | if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
787 | /* | ||
788 | * There appears to be other cpus that can accept | ||
789 | * current, so lets reschedule to try and push it away | ||
790 | */ | ||
791 | resched_task(rq->curr); | ||
792 | } | ||
793 | #endif | 807 | #endif |
794 | } | 808 | } |
795 | 809 | ||
@@ -922,6 +936,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
922 | return -1; /* No targets found */ | 936 | return -1; /* No targets found */ |
923 | 937 | ||
924 | /* | 938 | /* |
939 | * Only consider CPUs that are usable for migration. | ||
940 | * I guess we might want to change cpupri_find() to ignore those | ||
941 | * in the first place. | ||
942 | */ | ||
943 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | ||
944 | |||
945 | /* | ||
925 | * At this point we have built a mask of cpus representing the | 946 | * At this point we have built a mask of cpus representing the |
926 | * lowest priority tasks in the system. Now we want to elect | 947 | * lowest priority tasks in the system. Now we want to elect |
927 | * the best one based on our affinity and topology. | 948 | * the best one based on our affinity and topology. |
@@ -1415,7 +1436,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
1415 | * on the queue: | 1436 | * on the queue: |
1416 | */ | 1437 | */ |
1417 | if (p->rt.run_list.prev != p->rt.run_list.next) { | 1438 | if (p->rt.run_list.prev != p->rt.run_list.next) { |
1418 | requeue_task_rt(rq, p); | 1439 | requeue_task_rt(rq, p, 0); |
1419 | set_tsk_need_resched(p); | 1440 | set_tsk_need_resched(p); |
1420 | } | 1441 | } |
1421 | } | 1442 | } |