diff options
| author | Gregory Haskins <ghaskins@novell.com> | 2008-01-25 15:08:23 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:23 -0500 |
| commit | c49443c538c1bbf50eda27e4a3711e9fc15176b0 (patch) | |
| tree | 8dece13f1f5c48422359c981a4ef8bba92136597 | |
| parent | cdc8eb984ce47a7c90a049f45229f7b0d59ba781 (diff) | |
sched: remove some old cpuset logic
We had support for overlapping cpuset based rto logic in early
prototypes that is no longer used, so remove it.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | kernel/sched_rt.c | 33 |
1 files changed, 0 insertions, 33 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a386758ffebb..9affb3c9d3db 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -586,38 +586,6 @@ static int pull_rt_task(struct rq *this_rq) | |||
| 586 | continue; | 586 | continue; |
| 587 | 587 | ||
| 588 | src_rq = cpu_rq(cpu); | 588 | src_rq = cpu_rq(cpu); |
| 589 | if (unlikely(src_rq->rt.rt_nr_running <= 1)) { | ||
| 590 | /* | ||
| 591 | * It is possible that overlapping cpusets | ||
| 592 | * will miss clearing a non overloaded runqueue. | ||
| 593 | * Clear it now. | ||
| 594 | */ | ||
| 595 | if (double_lock_balance(this_rq, src_rq)) { | ||
| 596 | /* unlocked our runqueue lock */ | ||
| 597 | struct task_struct *old_next = next; | ||
| 598 | |||
| 599 | next = pick_next_task_rt(this_rq); | ||
| 600 | if (next != old_next) | ||
| 601 | ret = 1; | ||
| 602 | } | ||
| 603 | if (likely(src_rq->rt.rt_nr_running <= 1)) { | ||
| 604 | /* | ||
| 605 | * Small chance that this_rq->curr changed | ||
| 606 | * but it's really harmless here. | ||
| 607 | */ | ||
| 608 | rt_clear_overload(this_rq); | ||
| 609 | } else { | ||
| 610 | /* | ||
| 611 | * Heh, the src_rq is now overloaded, since | ||
| 612 | * we already have the src_rq lock, go straight | ||
| 613 | * to pulling tasks from it. | ||
| 614 | */ | ||
| 615 | goto try_pulling; | ||
| 616 | } | ||
| 617 | spin_unlock(&src_rq->lock); | ||
| 618 | continue; | ||
| 619 | } | ||
| 620 | |||
| 621 | /* | 589 | /* |
| 622 | * We can potentially drop this_rq's lock in | 590 | * We can potentially drop this_rq's lock in |
| 623 | * double_lock_balance, and another CPU could | 591 | * double_lock_balance, and another CPU could |
| @@ -641,7 +609,6 @@ static int pull_rt_task(struct rq *this_rq) | |||
| 641 | continue; | 609 | continue; |
| 642 | } | 610 | } |
| 643 | 611 | ||
| 644 | try_pulling: | ||
| 645 | p = pick_next_highest_task_rt(src_rq, this_cpu); | 612 | p = pick_next_highest_task_rt(src_rq, this_cpu); |
| 646 | 613 | ||
| 647 | /* | 614 | /* |
