aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPreeti U Murthy <preeti@linux.vnet.ibm.com>2013-10-29 23:12:52 -0400
committerIngo Molnar <mingo@kernel.org>2013-11-06 06:37:55 -0500
commit37dc6b50cee97954c4e6edcd5b1fa614b76038ee (patch)
treef317a2532d134b113c088bdf5026fb4aef1423c4 /kernel
parent2042abe7977222ef606306faa2dce8fd51e98e65 (diff)
sched: Remove unnecessary iteration over sched domains to update nr_busy_cpus
nr_busy_cpus parameter is used by nohz_kick_needed() to find out the number of busy cpus in a sched domain which has SD_SHARE_PKG_RESOURCES flag set. Therefore instead of updating nr_busy_cpus at every level of sched domain, since it is irrelevant, we can update this parameter only at the parent domain of the sd which has this flag set. Introduce a per-cpu parameter sd_busy which represents this parent domain. In nohz_kick_needed() we directly query the nr_busy_cpus parameter associated with the groups of sd_busy. By associating sd_busy with the highest domain which has SD_SHARE_PKG_RESOURCES flag set, we cover all lower level domains which could have this flag set and trigger nohz_idle_balancing if any of the levels have more than one busy cpu. sd_busy is irrelevant for asymmetric load balancing. However sd_asym has been introduced to represent the highest sched domain which has SD_ASYM_PACKING flag set so that it can be queried directly when required. While we are at it, we might as well change the nohz_idle parameter to be updated at the sd_busy domain level alone and not the base domain level of a CPU. This will unify the concept of busy cpus at just one level of sched domain where it is currently used. Signed-off-by: Preeti U Murthy<preeti@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: svaidy@linux.vnet.ibm.com Cc: vincent.guittot@linaro.org Cc: bitbucket@online.de Cc: benh@kernel.crashing.org Cc: anton@samba.org Cc: Morten.Rasmussen@arm.com Cc: pjt@google.com Cc: peterz@infradead.org Cc: mikey@neuling.org Link: http://lkml.kernel.org/r/20131030031252.23426.4417.stgit@preeti.in.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c38
-rw-r--r--kernel/sched/sched.h2
3 files changed, 28 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index aa066f306be2..1deccd78be98 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4883,6 +4883,8 @@ DEFINE_PER_CPU(struct sched_domain *, sd_llc);
4883DEFINE_PER_CPU(int, sd_llc_size); 4883DEFINE_PER_CPU(int, sd_llc_size);
4884DEFINE_PER_CPU(int, sd_llc_id); 4884DEFINE_PER_CPU(int, sd_llc_id);
4885DEFINE_PER_CPU(struct sched_domain *, sd_numa); 4885DEFINE_PER_CPU(struct sched_domain *, sd_numa);
4886DEFINE_PER_CPU(struct sched_domain *, sd_busy);
4887DEFINE_PER_CPU(struct sched_domain *, sd_asym);
4886 4888
4887static void update_top_cache_domain(int cpu) 4889static void update_top_cache_domain(int cpu)
4888{ 4890{
@@ -4894,6 +4896,7 @@ static void update_top_cache_domain(int cpu)
4894 if (sd) { 4896 if (sd) {
4895 id = cpumask_first(sched_domain_span(sd)); 4897 id = cpumask_first(sched_domain_span(sd));
4896 size = cpumask_weight(sched_domain_span(sd)); 4898 size = cpumask_weight(sched_domain_span(sd));
4899 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent);
4897 } 4900 }
4898 4901
4899 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 4902 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
@@ -4902,6 +4905,9 @@ static void update_top_cache_domain(int cpu)
4902 4905
4903 sd = lowest_flag_domain(cpu, SD_NUMA); 4906 sd = lowest_flag_domain(cpu, SD_NUMA);
4904 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 4907 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
4908
4909 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
4910 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
4905} 4911}
4906 4912
4907/* 4913/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 074551a792f7..df77c605c7a6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6534,16 +6534,16 @@ static inline void nohz_balance_exit_idle(int cpu)
6534static inline void set_cpu_sd_state_busy(void) 6534static inline void set_cpu_sd_state_busy(void)
6535{ 6535{
6536 struct sched_domain *sd; 6536 struct sched_domain *sd;
6537 int cpu = smp_processor_id();
6537 6538
6538 rcu_read_lock(); 6539 rcu_read_lock();
6539 sd = rcu_dereference_check_sched_domain(this_rq()->sd); 6540 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6540 6541
6541 if (!sd || !sd->nohz_idle) 6542 if (!sd || !sd->nohz_idle)
6542 goto unlock; 6543 goto unlock;
6543 sd->nohz_idle = 0; 6544 sd->nohz_idle = 0;
6544 6545
6545 for (; sd; sd = sd->parent) 6546 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6546 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6547unlock: 6547unlock:
6548 rcu_read_unlock(); 6548 rcu_read_unlock();
6549} 6549}
@@ -6551,16 +6551,16 @@ unlock:
6551void set_cpu_sd_state_idle(void) 6551void set_cpu_sd_state_idle(void)
6552{ 6552{
6553 struct sched_domain *sd; 6553 struct sched_domain *sd;
6554 int cpu = smp_processor_id();
6554 6555
6555 rcu_read_lock(); 6556 rcu_read_lock();
6556 sd = rcu_dereference_check_sched_domain(this_rq()->sd); 6557 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6557 6558
6558 if (!sd || sd->nohz_idle) 6559 if (!sd || sd->nohz_idle)
6559 goto unlock; 6560 goto unlock;
6560 sd->nohz_idle = 1; 6561 sd->nohz_idle = 1;
6561 6562
6562 for (; sd; sd = sd->parent) 6563 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6563 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6564unlock: 6564unlock:
6565 rcu_read_unlock(); 6565 rcu_read_unlock();
6566} 6566}
@@ -6767,6 +6767,8 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
6767{ 6767{
6768 unsigned long now = jiffies; 6768 unsigned long now = jiffies;
6769 struct sched_domain *sd; 6769 struct sched_domain *sd;
6770 struct sched_group_power *sgp;
6771 int nr_busy;
6770 6772
6771 if (unlikely(idle_cpu(cpu))) 6773 if (unlikely(idle_cpu(cpu)))
6772 return 0; 6774 return 0;
@@ -6792,22 +6794,22 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
6792 goto need_kick; 6794 goto need_kick;
6793 6795
6794 rcu_read_lock(); 6796 rcu_read_lock();
6795 for_each_domain(cpu, sd) { 6797 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6796 struct sched_group *sg = sd->groups;
6797 struct sched_group_power *sgp = sg->sgp;
6798 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
6799 6798
6800 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1) 6799 if (sd) {
6801 goto need_kick_unlock; 6800 sgp = sd->groups->sgp;
6801 nr_busy = atomic_read(&sgp->nr_busy_cpus);
6802 6802
6803 if (sd->flags & SD_ASYM_PACKING 6803 if (nr_busy > 1)
6804 && (cpumask_first_and(nohz.idle_cpus_mask,
6805 sched_domain_span(sd)) < cpu))
6806 goto need_kick_unlock; 6804 goto need_kick_unlock;
6807
6808 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
6809 break;
6810 } 6805 }
6806
6807 sd = rcu_dereference(per_cpu(sd_asym, cpu));
6808
6809 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
6810 sched_domain_span(sd)) < cpu))
6811 goto need_kick_unlock;
6812
6811 rcu_read_unlock(); 6813 rcu_read_unlock();
6812 return 0; 6814 return 0;
6813 6815
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4e650acffed7..88c85b21d633 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -623,6 +623,8 @@ DECLARE_PER_CPU(struct sched_domain *, sd_llc);
623DECLARE_PER_CPU(int, sd_llc_size); 623DECLARE_PER_CPU(int, sd_llc_size);
624DECLARE_PER_CPU(int, sd_llc_id); 624DECLARE_PER_CPU(int, sd_llc_id);
625DECLARE_PER_CPU(struct sched_domain *, sd_numa); 625DECLARE_PER_CPU(struct sched_domain *, sd_numa);
626DECLARE_PER_CPU(struct sched_domain *, sd_busy);
627DECLARE_PER_CPU(struct sched_domain *, sd_asym);
626 628
627struct sched_group_power { 629struct sched_group_power {
628 atomic_t ref; 630 atomic_t ref;