aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2014-01-17 04:04:03 -0500
committerIngo Molnar <mingo@kernel.org>2014-02-10 10:17:07 -0500
commit3c4017c13f91069194fce3160944efec50f15a6e (patch)
tree26c4c75063dcb2a3e0244b4776b76c5a218bbb58 /kernel/sched
parente5fc66119ec97054eefc83f173a7ee9e133c3c3a (diff)
sched: Move rq->idle_stamp up to the core
idle_balance() modifies the rq->idle_stamp field, making this information shared across core.c and fair.c. As we know if the cpu is going to idle or not with the previous patch, let's encapsulate the rq->idle_stamp information in core.c by moving it up to the caller. The idle_balance() function returns true in case a balancing occured and the cpu won't be idle, false if no balance happened and the cpu is going idle. Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: alex.shi@linaro.org Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1389949444-14821-3-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/fair.c14
-rw-r--r--kernel/sched/sched.h2
3 files changed, 16 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 74dd565c2e1b..417cf657a606 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2704,8 +2704,15 @@ need_resched:
2704 2704
2705 pre_schedule(rq, prev); 2705 pre_schedule(rq, prev);
2706 2706
2707 if (unlikely(!rq->nr_running)) 2707 if (unlikely(!rq->nr_running)) {
2708 idle_balance(rq); 2708 /*
2709 * We must set idle_stamp _before_ calling idle_balance(), such
2710 * that we measure the duration of idle_balance() as idle time.
2711 */
2712 rq->idle_stamp = rq_clock(rq);
2713 if (idle_balance(rq))
2714 rq->idle_stamp = 0;
2715 }
2709 2716
2710 put_prev_task(rq, prev); 2717 put_prev_task(rq, prev);
2711 next = pick_next_task(rq); 2718 next = pick_next_task(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5ebc6817c036..04fea7744a9f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6531,7 +6531,7 @@ out:
6531 * idle_balance is called by schedule() if this_cpu is about to become 6531 * idle_balance is called by schedule() if this_cpu is about to become
6532 * idle. Attempts to pull tasks from other CPUs. 6532 * idle. Attempts to pull tasks from other CPUs.
6533 */ 6533 */
6534void idle_balance(struct rq *this_rq) 6534int idle_balance(struct rq *this_rq)
6535{ 6535{
6536 struct sched_domain *sd; 6536 struct sched_domain *sd;
6537 int pulled_task = 0; 6537 int pulled_task = 0;
@@ -6539,10 +6539,8 @@ void idle_balance(struct rq *this_rq)
6539 u64 curr_cost = 0; 6539 u64 curr_cost = 0;
6540 int this_cpu = this_rq->cpu; 6540 int this_cpu = this_rq->cpu;
6541 6541
6542 this_rq->idle_stamp = rq_clock(this_rq);
6543
6544 if (this_rq->avg_idle < sysctl_sched_migration_cost) 6542 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6545 return; 6543 return 0;
6546 6544
6547 /* 6545 /*
6548 * Drop the rq->lock, but keep IRQ/preempt disabled. 6546 * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6580,10 +6578,8 @@ void idle_balance(struct rq *this_rq)
6580 interval = msecs_to_jiffies(sd->balance_interval); 6578 interval = msecs_to_jiffies(sd->balance_interval);
6581 if (time_after(next_balance, sd->last_balance + interval)) 6579 if (time_after(next_balance, sd->last_balance + interval))
6582 next_balance = sd->last_balance + interval; 6580 next_balance = sd->last_balance + interval;
6583 if (pulled_task) { 6581 if (pulled_task)
6584 this_rq->idle_stamp = 0;
6585 break; 6582 break;
6586 }
6587 } 6583 }
6588 rcu_read_unlock(); 6584 rcu_read_unlock();
6589 6585
@@ -6594,7 +6590,7 @@ void idle_balance(struct rq *this_rq)
6594 * A task could have be enqueued in the meantime 6590 * A task could have be enqueued in the meantime
6595 */ 6591 */
6596 if (this_rq->nr_running && !pulled_task) 6592 if (this_rq->nr_running && !pulled_task)
6597 return; 6593 return 1;
6598 6594
6599 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 6595 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6600 /* 6596 /*
@@ -6606,6 +6602,8 @@ void idle_balance(struct rq *this_rq)
6606 6602
6607 if (curr_cost > this_rq->max_idle_balance_cost) 6603 if (curr_cost > this_rq->max_idle_balance_cost)
6608 this_rq->max_idle_balance_cost = curr_cost; 6604 this_rq->max_idle_balance_cost = curr_cost;
6605
6606 return pulled_task;
6609} 6607}
6610 6608
6611/* 6609/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 82c0e02f2a58..bb89991ee409 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1158,7 +1158,7 @@ extern const struct sched_class idle_sched_class;
1158extern void update_group_power(struct sched_domain *sd, int cpu); 1158extern void update_group_power(struct sched_domain *sd, int cpu);
1159 1159
1160extern void trigger_load_balance(struct rq *rq); 1160extern void trigger_load_balance(struct rq *rq);
1161extern void idle_balance(struct rq *this_rq); 1161extern int idle_balance(struct rq *this_rq);
1162 1162
1163extern void idle_enter_fair(struct rq *this_rq); 1163extern void idle_enter_fair(struct rq *this_rq);
1164extern void idle_exit_fair(struct rq *this_rq); 1164extern void idle_exit_fair(struct rq *this_rq);