diff options
author | Jason Low <jason.low2@hp.com> | 2014-04-28 18:45:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-07 05:51:36 -0400 |
commit | 0e5b5337f0da073e1f17aec3c322ea7826975d0d (patch) | |
tree | 3f47f8935bb09d489614f5d86421c84f92a40e73 /kernel/sched/fair.c | |
parent | 6ccdc84b81a0a6c09a7f0427761d2f8cecfc2218 (diff) |
sched: Fix updating rq->max_idle_balance_cost and rq->next_balance in idle_balance()
The following commit:
e5fc66119ec9 ("sched: Fix race in idle_balance()")
can potentially cause rq->max_idle_balance_cost to not be updated,
even when load_balance(NEWLY_IDLE) is attempted and the per-sd
max cost value is updated.
Preeti noticed a similar issue with updating rq->next_balance.
In this patch, we fix this by making sure we still check/update those values
even if a task gets enqueued while browsing the domains.
Signed-off-by: Jason Low <jason.low2@hp.com>
Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: morten.rasmussen@arm.com
Cc: aswin@hp.com
Cc: daniel.lezcano@linaro.org
Cc: alex.shi@linaro.org
Cc: efault@gmx.de
Cc: vincent.guittot@linaro.org
Link: http://lkml.kernel.org/r/1398725155-7591-2-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7570dd969c28..0fdb96de81a5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq) | |||
6653 | int this_cpu = this_rq->cpu; | 6653 | int this_cpu = this_rq->cpu; |
6654 | 6654 | ||
6655 | idle_enter_fair(this_rq); | 6655 | idle_enter_fair(this_rq); |
6656 | |||
6656 | /* | 6657 | /* |
6657 | * We must set idle_stamp _before_ calling idle_balance(), such that we | 6658 | * We must set idle_stamp _before_ calling idle_balance(), such that we |
6658 | * measure the duration of idle_balance() as idle time. | 6659 | * measure the duration of idle_balance() as idle time. |
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq) | |||
6705 | 6706 | ||
6706 | raw_spin_lock(&this_rq->lock); | 6707 | raw_spin_lock(&this_rq->lock); |
6707 | 6708 | ||
6709 | if (curr_cost > this_rq->max_idle_balance_cost) | ||
6710 | this_rq->max_idle_balance_cost = curr_cost; | ||
6711 | |||
6708 | /* | 6712 | /* |
6709 | * While browsing the domains, we released the rq lock. | 6713 | * While browsing the domains, we released the rq lock, a task could |
6710 | * A task could have be enqueued in the meantime | 6714 | * have been enqueued in the meantime. Since we're not going idle, |
6715 | * pretend we pulled a task. | ||
6711 | */ | 6716 | */ |
6712 | if (this_rq->cfs.h_nr_running && !pulled_task) { | 6717 | if (this_rq->cfs.h_nr_running && !pulled_task) |
6713 | pulled_task = 1; | 6718 | pulled_task = 1; |
6714 | goto out; | ||
6715 | } | ||
6716 | 6719 | ||
6717 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 6720 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { |
6718 | /* | 6721 | /* |
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq) | |||
6722 | this_rq->next_balance = next_balance; | 6725 | this_rq->next_balance = next_balance; |
6723 | } | 6726 | } |
6724 | 6727 | ||
6725 | if (curr_cost > this_rq->max_idle_balance_cost) | ||
6726 | this_rq->max_idle_balance_cost = curr_cost; | ||
6727 | |||
6728 | out: | 6728 | out: |
6729 | /* Is there a task of a high priority class? */ | 6729 | /* Is there a task of a high priority class? */ |
6730 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && | 6730 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && |