aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-02-11 10:11:48 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-02-21 15:43:17 -0500
commit6e83125c6b151afa139c8852c099d6d92954fe3b (patch)
tree347addb4bc27edcfec493f328aba7531b39ef3f4 /kernel/sched/fair.c
parenteb7a59b2c888c2518ba2c9d0020343ca71aa9dee (diff)
sched/fair: Remove idle_balance() declaration in sched.h
Remove idle_balance() from the public life; also reduce some #ifdef clutter by folding the pick_next_task_fair() idle path into idle_balance(). Cc: mingo@kernel.org Reported-by: Daniel Lezcano <daniel.lezcano@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140211151148.GP27965@twins.programming.kicks-ass.net Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c47
1 files changed, 29 insertions, 18 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 280da893cd0f..40c758bbdd57 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2374,13 +2374,13 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
2374 se->avg.load_avg_contrib >>= NICE_0_SHIFT; 2374 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2375 } 2375 }
2376} 2376}
2377#else 2377#else /* CONFIG_FAIR_GROUP_SCHED */
2378static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, 2378static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2379 int force_update) {} 2379 int force_update) {}
2380static inline void __update_tg_runnable_avg(struct sched_avg *sa, 2380static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2381 struct cfs_rq *cfs_rq) {} 2381 struct cfs_rq *cfs_rq) {}
2382static inline void __update_group_entity_contrib(struct sched_entity *se) {} 2382static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2383#endif 2383#endif /* CONFIG_FAIR_GROUP_SCHED */
2384 2384
2385static inline void __update_task_entity_contrib(struct sched_entity *se) 2385static inline void __update_task_entity_contrib(struct sched_entity *se)
2386{ 2386{
@@ -2571,6 +2571,8 @@ void idle_exit_fair(struct rq *this_rq)
2571 update_rq_runnable_avg(this_rq, 0); 2571 update_rq_runnable_avg(this_rq, 0);
2572} 2572}
2573 2573
2574static int idle_balance(struct rq *this_rq);
2575
2574#else /* CONFIG_SMP */ 2576#else /* CONFIG_SMP */
2575 2577
2576static inline void update_entity_load_avg(struct sched_entity *se, 2578static inline void update_entity_load_avg(struct sched_entity *se,
@@ -2584,6 +2586,12 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2584 int sleep) {} 2586 int sleep) {}
2585static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, 2587static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2586 int force_update) {} 2588 int force_update) {}
2589
2590static inline int idle_balance(struct rq *rq)
2591{
2592 return 0;
2593}
2594
2587#endif /* CONFIG_SMP */ 2595#endif /* CONFIG_SMP */
2588 2596
2589static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 2597static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -4677,7 +4685,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev)
4677 struct sched_entity *se; 4685 struct sched_entity *se;
4678 struct task_struct *p; 4686 struct task_struct *p;
4679 4687
4680again: __maybe_unused 4688again:
4681#ifdef CONFIG_FAIR_GROUP_SCHED 4689#ifdef CONFIG_FAIR_GROUP_SCHED
4682 if (!cfs_rq->nr_running) 4690 if (!cfs_rq->nr_running)
4683 goto idle; 4691 goto idle;
@@ -4775,18 +4783,8 @@ simple:
4775 return p; 4783 return p;
4776 4784
4777idle: 4785idle:
4778#ifdef CONFIG_SMP 4786 if (idle_balance(rq)) /* drops rq->lock */
4779 idle_enter_fair(rq);
4780 /*
4781 * We must set idle_stamp _before_ calling idle_balance(), such that we
4782 * measure the duration of idle_balance() as idle time.
4783 */
4784 rq->idle_stamp = rq_clock(rq);
4785 if (idle_balance(rq)) { /* drops rq->lock */
4786 rq->idle_stamp = 0;
4787 goto again; 4787 goto again;
4788 }
4789#endif
4790 4788
4791 return NULL; 4789 return NULL;
4792} 4790}
@@ -6634,7 +6632,7 @@ out:
6634 * idle_balance is called by schedule() if this_cpu is about to become 6632 * idle_balance is called by schedule() if this_cpu is about to become
6635 * idle. Attempts to pull tasks from other CPUs. 6633 * idle. Attempts to pull tasks from other CPUs.
6636 */ 6634 */
6637int idle_balance(struct rq *this_rq) 6635static int idle_balance(struct rq *this_rq)
6638{ 6636{
6639 struct sched_domain *sd; 6637 struct sched_domain *sd;
6640 int pulled_task = 0; 6638 int pulled_task = 0;
@@ -6642,8 +6640,15 @@ int idle_balance(struct rq *this_rq)
6642 u64 curr_cost = 0; 6640 u64 curr_cost = 0;
6643 int this_cpu = this_rq->cpu; 6641 int this_cpu = this_rq->cpu;
6644 6642
6643 idle_enter_fair(this_rq);
6644 /*
6645 * We must set idle_stamp _before_ calling idle_balance(), such that we
6646 * measure the duration of idle_balance() as idle time.
6647 */
6648 this_rq->idle_stamp = rq_clock(this_rq);
6649
6645 if (this_rq->avg_idle < sysctl_sched_migration_cost) 6650 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6646 return 0; 6651 goto out;
6647 6652
6648 /* 6653 /*
6649 * Drop the rq->lock, but keep IRQ/preempt disabled. 6654 * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6692,8 +6697,10 @@ int idle_balance(struct rq *this_rq)
6692 * While browsing the domains, we released the rq lock. 6697 * While browsing the domains, we released the rq lock.
6693 * A task could have be enqueued in the meantime 6698 * A task could have be enqueued in the meantime
6694 */ 6699 */
6695 if (this_rq->nr_running && !pulled_task) 6700 if (this_rq->nr_running && !pulled_task) {
6696 return 1; 6701 pulled_task = 1;
6702 goto out;
6703 }
6697 6704
6698 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 6705 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6699 /* 6706 /*
@@ -6706,6 +6713,10 @@ int idle_balance(struct rq *this_rq)
6706 if (curr_cost > this_rq->max_idle_balance_cost) 6713 if (curr_cost > this_rq->max_idle_balance_cost)
6707 this_rq->max_idle_balance_cost = curr_cost; 6714 this_rq->max_idle_balance_cost = curr_cost;
6708 6715
6716out:
6717 if (pulled_task)
6718 this_rq->idle_stamp = 0;
6719
6709 return pulled_task; 6720 return pulled_task;
6710} 6721}
6711 6722