aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-01-25 15:08:12 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:12 -0500
commita22d7fc187ed996b66d8439db27b2303f79a8e7b (patch)
tree44845eaac2aa44b185d0663d689fea29d94ea5ff /kernel
parent6e1254d2c41215da27025add8900ed187bca121d (diff)
sched: wake-balance fixes
We have logic to detect whether the system has migratable tasks, but we are not using it when deciding whether to push tasks away. So we add support for considering this new information. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_rt.c10
2 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3344ba776b97..c591abd9ca38 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -346,6 +346,7 @@ struct rt_rq {
346 unsigned long rt_nr_migratory; 346 unsigned long rt_nr_migratory;
347 /* highest queued rt task prio */ 347 /* highest queued rt task prio */
348 int highest_prio; 348 int highest_prio;
349 int overloaded;
349}; 350};
350 351
351/* 352/*
@@ -6770,6 +6771,7 @@ void __init sched_init(void)
6770 rq->migration_thread = NULL; 6771 rq->migration_thread = NULL;
6771 INIT_LIST_HEAD(&rq->migration_queue); 6772 INIT_LIST_HEAD(&rq->migration_queue);
6772 rq->rt.highest_prio = MAX_RT_PRIO; 6773 rq->rt.highest_prio = MAX_RT_PRIO;
6774 rq->rt.overloaded = 0;
6773#endif 6775#endif
6774 atomic_set(&rq->nr_iowait, 0); 6776 atomic_set(&rq->nr_iowait, 0);
6775 6777
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a9d7d4408160..87d7b3ff3861 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void)
16} 16}
17static inline void rt_set_overload(struct rq *rq) 17static inline void rt_set_overload(struct rq *rq)
18{ 18{
19 rq->rt.overloaded = 1;
19 cpu_set(rq->cpu, rt_overload_mask); 20 cpu_set(rq->cpu, rt_overload_mask);
20 /* 21 /*
21 * Make sure the mask is visible before we set 22 * Make sure the mask is visible before we set
@@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq)
32 /* the order here really doesn't matter */ 33 /* the order here really doesn't matter */
33 atomic_dec(&rto_count); 34 atomic_dec(&rto_count);
34 cpu_clear(rq->cpu, rt_overload_mask); 35 cpu_clear(rq->cpu, rt_overload_mask);
36 rq->rt.overloaded = 0;
35} 37}
36 38
37static void update_rt_migration(struct rq *rq) 39static void update_rt_migration(struct rq *rq)
@@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq)
448 450
449 assert_spin_locked(&rq->lock); 451 assert_spin_locked(&rq->lock);
450 452
453 if (!rq->rt.overloaded)
454 return 0;
455
451 next_task = pick_next_highest_task_rt(rq, -1); 456 next_task = pick_next_highest_task_rt(rq, -1);
452 if (!next_task) 457 if (!next_task)
453 return 0; 458 return 0;
@@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq)
675 * the lock was owned by prev, we need to release it 680 * the lock was owned by prev, we need to release it
676 * first via finish_lock_switch and then reaquire it here. 681 * first via finish_lock_switch and then reaquire it here.
677 */ 682 */
678 if (unlikely(rq->rt.rt_nr_running > 1)) { 683 if (unlikely(rq->rt.overloaded)) {
679 spin_lock_irq(&rq->lock); 684 spin_lock_irq(&rq->lock);
680 push_rt_tasks(rq); 685 push_rt_tasks(rq);
681 spin_unlock_irq(&rq->lock); 686 spin_unlock_irq(&rq->lock);
@@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
687{ 692{
688 if (unlikely(rt_task(p)) && 693 if (unlikely(rt_task(p)) &&
689 !task_running(rq, p) && 694 !task_running(rq, p) &&
690 (p->prio >= rq->curr->prio)) 695 (p->prio >= rq->rt.highest_prio) &&
696 rq->rt.overloaded)
691 push_rt_tasks(rq); 697 push_rt_tasks(rq);
692} 698}
693 699