aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-01-25 15:08:07 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:07 -0500
commitc7a1e46aa9782a947cf2ed506245d43396dbf991 (patch)
tree1cbf3bc6173b57ea9819c1f0a761347c4285b107 /kernel/sched_rt.c
parent4642dafdf93dc7d66ee33437b93a5e6b8cea20d2 (diff)
sched: disable standard balancer for RT tasks
Since we now take an active approach to load balancing, we don't need to balance RT tasks via the normal task balancer. In fact, this code was found to pull RT tasks away from CPUS that the active movement performed, resulting in large latencies. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c95
1 files changed, 4 insertions, 91 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d38a8a559aa5..c492fd2b2eec 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -567,109 +567,22 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
567 push_rt_tasks(rq); 567 push_rt_tasks(rq);
568} 568}
569 569
570/*
571 * Load-balancing iterator. Note: while the runqueue stays locked
572 * during the whole iteration, the current task might be
573 * dequeued so the iterator has to be dequeue-safe. Here we
574 * achieve that by always pre-iterating before returning
575 * the current task:
576 */
577static struct task_struct *load_balance_start_rt(void *arg)
578{
579 struct rq *rq = arg;
580 struct rt_prio_array *array = &rq->rt.active;
581 struct list_head *head, *curr;
582 struct task_struct *p;
583 int idx;
584
585 idx = sched_find_first_bit(array->bitmap);
586 if (idx >= MAX_RT_PRIO)
587 return NULL;
588
589 head = array->queue + idx;
590 curr = head->prev;
591
592 p = list_entry(curr, struct task_struct, run_list);
593
594 curr = curr->prev;
595
596 rq->rt.rt_load_balance_idx = idx;
597 rq->rt.rt_load_balance_head = head;
598 rq->rt.rt_load_balance_curr = curr;
599
600 return p;
601}
602
603static struct task_struct *load_balance_next_rt(void *arg)
604{
605 struct rq *rq = arg;
606 struct rt_prio_array *array = &rq->rt.active;
607 struct list_head *head, *curr;
608 struct task_struct *p;
609 int idx;
610
611 idx = rq->rt.rt_load_balance_idx;
612 head = rq->rt.rt_load_balance_head;
613 curr = rq->rt.rt_load_balance_curr;
614
615 /*
616 * If we arrived back to the head again then
617 * iterate to the next queue (if any):
618 */
619 if (unlikely(head == curr)) {
620 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
621
622 if (next_idx >= MAX_RT_PRIO)
623 return NULL;
624
625 idx = next_idx;
626 head = array->queue + idx;
627 curr = head->prev;
628
629 rq->rt.rt_load_balance_idx = idx;
630 rq->rt.rt_load_balance_head = head;
631 }
632
633 p = list_entry(curr, struct task_struct, run_list);
634
635 curr = curr->prev;
636
637 rq->rt.rt_load_balance_curr = curr;
638
639 return p;
640}
641
642static unsigned long 570static unsigned long
643load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, 571load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
644 unsigned long max_load_move, 572 unsigned long max_load_move,
645 struct sched_domain *sd, enum cpu_idle_type idle, 573 struct sched_domain *sd, enum cpu_idle_type idle,
646 int *all_pinned, int *this_best_prio) 574 int *all_pinned, int *this_best_prio)
647{ 575{
648 struct rq_iterator rt_rq_iterator; 576 /* don't touch RT tasks */
649 577 return 0;
650 rt_rq_iterator.start = load_balance_start_rt;
651 rt_rq_iterator.next = load_balance_next_rt;
652 /* pass 'busiest' rq argument into
653 * load_balance_[start|next]_rt iterators
654 */
655 rt_rq_iterator.arg = busiest;
656
657 return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
658 idle, all_pinned, this_best_prio, &rt_rq_iterator);
659} 578}
660 579
661static int 580static int
662move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, 581move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
663 struct sched_domain *sd, enum cpu_idle_type idle) 582 struct sched_domain *sd, enum cpu_idle_type idle)
664{ 583{
665 struct rq_iterator rt_rq_iterator; 584 /* don't touch RT tasks */
666 585 return 0;
667 rt_rq_iterator.start = load_balance_start_rt;
668 rt_rq_iterator.next = load_balance_next_rt;
669 rt_rq_iterator.arg = busiest;
670
671 return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
672 &rt_rq_iterator);
673} 586}
674#else /* CONFIG_SMP */ 587#else /* CONFIG_SMP */
675# define schedule_tail_balance_rt(rq) do { } while (0) 588# define schedule_tail_balance_rt(rq) do { } while (0)