aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2014-01-17 04:04:03 -0500
committerIngo Molnar <mingo@kernel.org>2014-02-10 10:17:07 -0500
commit3c4017c13f91069194fce3160944efec50f15a6e (patch)
tree26c4c75063dcb2a3e0244b4776b76c5a218bbb58 /kernel/sched/fair.c
parente5fc66119ec97054eefc83f173a7ee9e133c3c3a (diff)
sched: Move rq->idle_stamp up to the core
idle_balance() modifies the rq->idle_stamp field, making this information shared across core.c and fair.c. As we know if the cpu is going to idle or not with the previous patch, let's encapsulate the rq->idle_stamp information in core.c by moving it up to the caller. The idle_balance() function returns true in case a balancing occured and the cpu won't be idle, false if no balance happened and the cpu is going idle. Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: alex.shi@linaro.org Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1389949444-14821-3-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5ebc6817c036..04fea7744a9f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6531,7 +6531,7 @@ out:
6531 * idle_balance is called by schedule() if this_cpu is about to become 6531 * idle_balance is called by schedule() if this_cpu is about to become
6532 * idle. Attempts to pull tasks from other CPUs. 6532 * idle. Attempts to pull tasks from other CPUs.
6533 */ 6533 */
6534void idle_balance(struct rq *this_rq) 6534int idle_balance(struct rq *this_rq)
6535{ 6535{
6536 struct sched_domain *sd; 6536 struct sched_domain *sd;
6537 int pulled_task = 0; 6537 int pulled_task = 0;
@@ -6539,10 +6539,8 @@ void idle_balance(struct rq *this_rq)
6539 u64 curr_cost = 0; 6539 u64 curr_cost = 0;
6540 int this_cpu = this_rq->cpu; 6540 int this_cpu = this_rq->cpu;
6541 6541
6542 this_rq->idle_stamp = rq_clock(this_rq);
6543
6544 if (this_rq->avg_idle < sysctl_sched_migration_cost) 6542 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6545 return; 6543 return 0;
6546 6544
6547 /* 6545 /*
6548 * Drop the rq->lock, but keep IRQ/preempt disabled. 6546 * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6580,10 +6578,8 @@ void idle_balance(struct rq *this_rq)
6580 interval = msecs_to_jiffies(sd->balance_interval); 6578 interval = msecs_to_jiffies(sd->balance_interval);
6581 if (time_after(next_balance, sd->last_balance + interval)) 6579 if (time_after(next_balance, sd->last_balance + interval))
6582 next_balance = sd->last_balance + interval; 6580 next_balance = sd->last_balance + interval;
6583 if (pulled_task) { 6581 if (pulled_task)
6584 this_rq->idle_stamp = 0;
6585 break; 6582 break;
6586 }
6587 } 6583 }
6588 rcu_read_unlock(); 6584 rcu_read_unlock();
6589 6585
@@ -6594,7 +6590,7 @@ void idle_balance(struct rq *this_rq)
6594 * A task could have be enqueued in the meantime 6590 * A task could have be enqueued in the meantime
6595 */ 6591 */
6596 if (this_rq->nr_running && !pulled_task) 6592 if (this_rq->nr_running && !pulled_task)
6597 return; 6593 return 1;
6598 6594
6599 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 6595 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6600 /* 6596 /*
@@ -6606,6 +6602,8 @@ void idle_balance(struct rq *this_rq)
6606 6602
6607 if (curr_cost > this_rq->max_idle_balance_cost) 6603 if (curr_cost > this_rq->max_idle_balance_cost)
6608 this_rq->max_idle_balance_cost = curr_cost; 6604 this_rq->max_idle_balance_cost = curr_cost;
6605
6606 return pulled_task;
6609} 6607}
6610 6608
6611/* 6609/*