aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-25 15:08:16 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:16 -0500
commit00597c3ed78e424bdafff123565c078d8b6088cf (patch)
tree9aa1df064152008969f6fa6eacec7f2b15110f75 /kernel
parent6e1938d3ad58c940ec4119d387dd92a787cb238c (diff)
sched: remove leftover debugging
remove leftover debugging. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_rt.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index deff0c77d705..cc38521c5723 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -253,8 +253,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
253 struct list_head *queue; 253 struct list_head *queue;
254 int idx; 254 int idx;
255 255
256 assert_spin_locked(&rq->lock);
257
258 if (likely(rq->rt.rt_nr_running < 2)) 256 if (likely(rq->rt.rt_nr_running < 2))
259 return NULL; 257 return NULL;
260 258
@@ -500,8 +498,6 @@ static int push_rt_task(struct rq *rq)
500 int ret = 0; 498 int ret = 0;
501 int paranoid = RT_MAX_TRIES; 499 int paranoid = RT_MAX_TRIES;
502 500
503 assert_spin_locked(&rq->lock);
504
505 if (!rq->rt.overloaded) 501 if (!rq->rt.overloaded)
506 return 0; 502 return 0;
507 503
@@ -546,8 +542,6 @@ static int push_rt_task(struct rq *rq)
546 goto out; 542 goto out;
547 } 543 }
548 544
549 assert_spin_locked(&lowest_rq->lock);
550
551 deactivate_task(rq, next_task, 0); 545 deactivate_task(rq, next_task, 0);
552 set_task_cpu(next_task, lowest_rq->cpu); 546 set_task_cpu(next_task, lowest_rq->cpu);
553 activate_task(lowest_rq, next_task, 0); 547 activate_task(lowest_rq, next_task, 0);
@@ -589,8 +583,6 @@ static int pull_rt_task(struct rq *this_rq)
589 int cpu; 583 int cpu;
590 int ret = 0; 584 int ret = 0;
591 585
592 assert_spin_locked(&this_rq->lock);
593
594 /* 586 /*
595 * If cpusets are used, and we have overlapping 587 * If cpusets are used, and we have overlapping
596 * run queue cpusets, then this algorithm may not catch all. 588 * run queue cpusets, then this algorithm may not catch all.