aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_rt.c33
1 files changed, 0 insertions, 33 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a386758ffebb..9affb3c9d3db 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -586,38 +586,6 @@ static int pull_rt_task(struct rq *this_rq)
586 continue; 586 continue;
587 587
588 src_rq = cpu_rq(cpu); 588 src_rq = cpu_rq(cpu);
589 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
590 /*
591 * It is possible that overlapping cpusets
592 * will miss clearing a non overloaded runqueue.
593 * Clear it now.
594 */
595 if (double_lock_balance(this_rq, src_rq)) {
596 /* unlocked our runqueue lock */
597 struct task_struct *old_next = next;
598
599 next = pick_next_task_rt(this_rq);
600 if (next != old_next)
601 ret = 1;
602 }
603 if (likely(src_rq->rt.rt_nr_running <= 1)) {
604 /*
605 * Small chance that this_rq->curr changed
606 * but it's really harmless here.
607 */
608 rt_clear_overload(this_rq);
609 } else {
610 /*
611 * Heh, the src_rq is now overloaded, since
612 * we already have the src_rq lock, go straight
613 * to pulling tasks from it.
614 */
615 goto try_pulling;
616 }
617 spin_unlock(&src_rq->lock);
618 continue;
619 }
620
621 /* 589 /*
622 * We can potentially drop this_rq's lock in 590 * We can potentially drop this_rq's lock in
623 * double_lock_balance, and another CPU could 591 * double_lock_balance, and another CPU could
@@ -641,7 +609,6 @@ static int pull_rt_task(struct rq *this_rq)
641 continue; 609 continue;
642 } 610 }
643 611
644 try_pulling:
645 p = pick_next_highest_task_rt(src_rq, this_cpu); 612 p = pick_next_highest_task_rt(src_rq, this_cpu);
646 613
647 /* 614 /*