diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a9d7d4408160..87d7b3ff3861 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void) | |||
16 | } | 16 | } |
17 | static inline void rt_set_overload(struct rq *rq) | 17 | static inline void rt_set_overload(struct rq *rq) |
18 | { | 18 | { |
19 | rq->rt.overloaded = 1; | ||
19 | cpu_set(rq->cpu, rt_overload_mask); | 20 | cpu_set(rq->cpu, rt_overload_mask); |
20 | /* | 21 | /* |
21 | * Make sure the mask is visible before we set | 22 | * Make sure the mask is visible before we set |
@@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq) | |||
32 | /* the order here really doesn't matter */ | 33 | /* the order here really doesn't matter */ |
33 | atomic_dec(&rto_count); | 34 | atomic_dec(&rto_count); |
34 | cpu_clear(rq->cpu, rt_overload_mask); | 35 | cpu_clear(rq->cpu, rt_overload_mask); |
36 | rq->rt.overloaded = 0; | ||
35 | } | 37 | } |
36 | 38 | ||
37 | static void update_rt_migration(struct rq *rq) | 39 | static void update_rt_migration(struct rq *rq) |
@@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq) | |||
448 | 450 | ||
449 | assert_spin_locked(&rq->lock); | 451 | assert_spin_locked(&rq->lock); |
450 | 452 | ||
453 | if (!rq->rt.overloaded) | ||
454 | return 0; | ||
455 | |||
451 | next_task = pick_next_highest_task_rt(rq, -1); | 456 | next_task = pick_next_highest_task_rt(rq, -1); |
452 | if (!next_task) | 457 | if (!next_task) |
453 | return 0; | 458 | return 0; |
@@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq) | |||
675 | * the lock was owned by prev, we need to release it | 680 | * the lock was owned by prev, we need to release it |
676 | * first via finish_lock_switch and then reaquire it here. | 681 | * first via finish_lock_switch and then reaquire it here. |
677 | */ | 682 | */ |
678 | if (unlikely(rq->rt.rt_nr_running > 1)) { | 683 | if (unlikely(rq->rt.overloaded)) { |
679 | spin_lock_irq(&rq->lock); | 684 | spin_lock_irq(&rq->lock); |
680 | push_rt_tasks(rq); | 685 | push_rt_tasks(rq); |
681 | spin_unlock_irq(&rq->lock); | 686 | spin_unlock_irq(&rq->lock); |
@@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) | |||
687 | { | 692 | { |
688 | if (unlikely(rt_task(p)) && | 693 | if (unlikely(rt_task(p)) && |
689 | !task_running(rq, p) && | 694 | !task_running(rq, p) && |
690 | (p->prio >= rq->curr->prio)) | 695 | (p->prio >= rq->rt.highest_prio) && |
696 | rq->rt.overloaded) | ||
691 | push_rt_tasks(rq); | 697 | push_rt_tasks(rq); |
692 | } | 698 | } |
693 | 699 | ||