aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2011-12-01 20:07:35 -0500
committerIngo Molnar <mingo@elte.hu>2011-12-06 03:06:36 -0500
commit786d6dc7aeb2bfbfe417507b7beb83919f319db3 (patch)
treeb2190cc2e926b2ce27918525443810e1c6a5416f /kernel/sched/fair.c
parent0b005cf54eac170a8f22540ab096a6e07bf49e7c (diff)
sched, nohz: Clean up the find_new_ilb() using sched groups nr_busy_cpus
nr_busy_cpus in the sched_group_power indicates whether the group is semi idle or not. This helps remove the is_semi_idle_group() and simplify the find_new_ilb() in the context of finding an optimal cpu that can do idle load balancing. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20111202010832.656983582@sbsiddha-desk.sc.intel.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c48
1 files changed, 12 insertions, 36 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 821af14335f3..65a6f8b1bf14 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4733,7 +4733,6 @@ out_unlock:
4733 */ 4733 */
4734static struct { 4734static struct {
4735 cpumask_var_t idle_cpus_mask; 4735 cpumask_var_t idle_cpus_mask;
4736 cpumask_var_t grp_idle_mask;
4737 atomic_t nr_cpus; 4736 atomic_t nr_cpus;
4738 unsigned long next_balance; /* in jiffy units */ 4737 unsigned long next_balance; /* in jiffy units */
4739} nohz ____cacheline_aligned; 4738} nohz ____cacheline_aligned;
@@ -4774,33 +4773,6 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4774 (sd && (sd->flags & flag)); sd = sd->parent) 4773 (sd && (sd->flags & flag)); sd = sd->parent)
4775 4774
4776/** 4775/**
4777 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4778 * @ilb_group: group to be checked for semi-idleness
4779 *
4780 * Returns: 1 if the group is semi-idle. 0 otherwise.
4781 *
4782 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4783 * and atleast one non-idle CPU. This helper function checks if the given
4784 * sched_group is semi-idle or not.
4785 */
4786static inline int is_semi_idle_group(struct sched_group *ilb_group)
4787{
4788 cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
4789 sched_group_cpus(ilb_group));
4790
4791 /*
4792 * A sched_group is semi-idle when it has atleast one busy cpu
4793 * and atleast one idle cpu.
4794 */
4795 if (cpumask_empty(nohz.grp_idle_mask))
4796 return 0;
4797
4798 if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
4799 return 0;
4800
4801 return 1;
4802}
4803/**
4804 * find_new_ilb - Finds the optimum idle load balancer for nomination. 4776 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4805 * @cpu: The cpu which is nominating a new idle_load_balancer. 4777 * @cpu: The cpu which is nominating a new idle_load_balancer.
4806 * 4778 *
@@ -4815,8 +4787,8 @@ static inline int is_semi_idle_group(struct sched_group *ilb_group)
4815static int find_new_ilb(int cpu) 4787static int find_new_ilb(int cpu)
4816{ 4788{
4817 int ilb = cpumask_first(nohz.idle_cpus_mask); 4789 int ilb = cpumask_first(nohz.idle_cpus_mask);
4790 struct sched_group *ilbg;
4818 struct sched_domain *sd; 4791 struct sched_domain *sd;
4819 struct sched_group *ilb_group;
4820 4792
4821 /* 4793 /*
4822 * Have idle load balancer selection from semi-idle packages only 4794 * Have idle load balancer selection from semi-idle packages only
@@ -4834,23 +4806,28 @@ static int find_new_ilb(int cpu)
4834 4806
4835 rcu_read_lock(); 4807 rcu_read_lock();
4836 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { 4808 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4837 ilb_group = sd->groups; 4809 ilbg = sd->groups;
4838 4810
4839 do { 4811 do {
4840 if (is_semi_idle_group(ilb_group)) { 4812 if (ilbg->group_weight !=
4841 ilb = cpumask_first(nohz.grp_idle_mask); 4813 atomic_read(&ilbg->sgp->nr_busy_cpus)) {
4814 ilb = cpumask_first_and(nohz.idle_cpus_mask,
4815 sched_group_cpus(ilbg));
4842 goto unlock; 4816 goto unlock;
4843 } 4817 }
4844 4818
4845 ilb_group = ilb_group->next; 4819 ilbg = ilbg->next;
4846 4820
4847 } while (ilb_group != sd->groups); 4821 } while (ilbg != sd->groups);
4848 } 4822 }
4849unlock: 4823unlock:
4850 rcu_read_unlock(); 4824 rcu_read_unlock();
4851 4825
4852out_done: 4826out_done:
4853 return ilb; 4827 if (ilb < nr_cpu_ids && idle_cpu(ilb))
4828 return ilb;
4829
4830 return nr_cpu_ids;
4854} 4831}
4855#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ 4832#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4856static inline int find_new_ilb(int call_cpu) 4833static inline int find_new_ilb(int call_cpu)
@@ -5588,7 +5565,6 @@ __init void init_sched_fair_class(void)
5588 5565
5589#ifdef CONFIG_NO_HZ 5566#ifdef CONFIG_NO_HZ
5590 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 5567 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
5591 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
5592#endif 5568#endif
5593#endif /* SMP */ 5569#endif /* SMP */
5594 5570