aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_rt.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index cc38521c5723..05ada7d44800 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -576,12 +576,9 @@ static void push_rt_tasks(struct rq *rq)
576 576
577static int pull_rt_task(struct rq *this_rq) 577static int pull_rt_task(struct rq *this_rq)
578{ 578{
579 struct task_struct *next; 579 int this_cpu = this_rq->cpu, ret = 0, cpu;
580 struct task_struct *p; 580 struct task_struct *p, *next;
581 struct rq *src_rq; 581 struct rq *src_rq;
582 int this_cpu = this_rq->cpu;
583 int cpu;
584 int ret = 0;
585 582
586 /* 583 /*
587 * If cpusets are used, and we have overlapping 584 * If cpusets are used, and we have overlapping
@@ -608,23 +605,25 @@ static int pull_rt_task(struct rq *this_rq)
608 if (double_lock_balance(this_rq, src_rq)) { 605 if (double_lock_balance(this_rq, src_rq)) {
609 /* unlocked our runqueue lock */ 606 /* unlocked our runqueue lock */
610 struct task_struct *old_next = next; 607 struct task_struct *old_next = next;
608
611 next = pick_next_task_rt(this_rq); 609 next = pick_next_task_rt(this_rq);
612 if (next != old_next) 610 if (next != old_next)
613 ret = 1; 611 ret = 1;
614 } 612 }
615 if (likely(src_rq->rt.rt_nr_running <= 1)) 613 if (likely(src_rq->rt.rt_nr_running <= 1)) {
616 /* 614 /*
617 * Small chance that this_rq->curr changed 615 * Small chance that this_rq->curr changed
618 * but it's really harmless here. 616 * but it's really harmless here.
619 */ 617 */
620 rt_clear_overload(this_rq); 618 rt_clear_overload(this_rq);
621 else 619 } else {
622 /* 620 /*
623 * Heh, the src_rq is now overloaded, since 621 * Heh, the src_rq is now overloaded, since
624 * we already have the src_rq lock, go straight 622 * we already have the src_rq lock, go straight
625 * to pulling tasks from it. 623 * to pulling tasks from it.
626 */ 624 */
627 goto try_pulling; 625 goto try_pulling;
626 }
628 spin_unlock(&src_rq->lock); 627 spin_unlock(&src_rq->lock);
629 continue; 628 continue;
630 } 629 }
@@ -638,6 +637,7 @@ static int pull_rt_task(struct rq *this_rq)
638 */ 637 */
639 if (double_lock_balance(this_rq, src_rq)) { 638 if (double_lock_balance(this_rq, src_rq)) {
640 struct task_struct *old_next = next; 639 struct task_struct *old_next = next;
640
641 next = pick_next_task_rt(this_rq); 641 next = pick_next_task_rt(this_rq);
642 if (next != old_next) 642 if (next != old_next)
643 ret = 1; 643 ret = 1;
@@ -674,7 +674,7 @@ static int pull_rt_task(struct rq *this_rq)
674 */ 674 */
675 if (p->prio < src_rq->curr->prio || 675 if (p->prio < src_rq->curr->prio ||
676 (next && next->prio < src_rq->curr->prio)) 676 (next && next->prio < src_rq->curr->prio))
677 goto bail; 677 goto out;
678 678
679 ret = 1; 679 ret = 1;
680 680
@@ -686,9 +686,7 @@ static int pull_rt_task(struct rq *this_rq)
686 * case there's an even higher prio task 686 * case there's an even higher prio task
687 * in another runqueue. (low likelyhood 687 * in another runqueue. (low likelyhood
688 * but possible) 688 * but possible)
689 */ 689 *
690
691 /*
692 * Update next so that we won't pick a task 690 * Update next so that we won't pick a task
693 * on another cpu with a priority lower (or equal) 691 * on another cpu with a priority lower (or equal)
694 * than the one we just picked. 692 * than the one we just picked.
@@ -696,7 +694,7 @@ static int pull_rt_task(struct rq *this_rq)
696 next = p; 694 next = p;
697 695
698 } 696 }
699 bail: 697 out:
700 spin_unlock(&src_rq->lock); 698 spin_unlock(&src_rq->lock);
701 } 699 }
702 700