aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c38
-rw-r--r--kernel/sched/sched.h2
3 files changed, 28 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index aa066f306be2..1deccd78be98 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4883,6 +4883,8 @@ DEFINE_PER_CPU(struct sched_domain *, sd_llc);
4883DEFINE_PER_CPU(int, sd_llc_size); 4883DEFINE_PER_CPU(int, sd_llc_size);
4884DEFINE_PER_CPU(int, sd_llc_id); 4884DEFINE_PER_CPU(int, sd_llc_id);
4885DEFINE_PER_CPU(struct sched_domain *, sd_numa); 4885DEFINE_PER_CPU(struct sched_domain *, sd_numa);
4886DEFINE_PER_CPU(struct sched_domain *, sd_busy);
4887DEFINE_PER_CPU(struct sched_domain *, sd_asym);
4886 4888
4887static void update_top_cache_domain(int cpu) 4889static void update_top_cache_domain(int cpu)
4888{ 4890{
@@ -4894,6 +4896,7 @@ static void update_top_cache_domain(int cpu)
4894 if (sd) { 4896 if (sd) {
4895 id = cpumask_first(sched_domain_span(sd)); 4897 id = cpumask_first(sched_domain_span(sd));
4896 size = cpumask_weight(sched_domain_span(sd)); 4898 size = cpumask_weight(sched_domain_span(sd));
4899 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent);
4897 } 4900 }
4898 4901
4899 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 4902 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
@@ -4902,6 +4905,9 @@ static void update_top_cache_domain(int cpu)
4902 4905
4903 sd = lowest_flag_domain(cpu, SD_NUMA); 4906 sd = lowest_flag_domain(cpu, SD_NUMA);
4904 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 4907 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
4908
4909 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
4910 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
4905} 4911}
4906 4912
4907/* 4913/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 074551a792f7..df77c605c7a6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6534,16 +6534,16 @@ static inline void nohz_balance_exit_idle(int cpu)
6534static inline void set_cpu_sd_state_busy(void) 6534static inline void set_cpu_sd_state_busy(void)
6535{ 6535{
6536 struct sched_domain *sd; 6536 struct sched_domain *sd;
6537 int cpu = smp_processor_id();
6537 6538
6538 rcu_read_lock(); 6539 rcu_read_lock();
6539 sd = rcu_dereference_check_sched_domain(this_rq()->sd); 6540 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6540 6541
6541 if (!sd || !sd->nohz_idle) 6542 if (!sd || !sd->nohz_idle)
6542 goto unlock; 6543 goto unlock;
6543 sd->nohz_idle = 0; 6544 sd->nohz_idle = 0;
6544 6545
6545 for (; sd; sd = sd->parent) 6546 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6546 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6547unlock: 6547unlock:
6548 rcu_read_unlock(); 6548 rcu_read_unlock();
6549} 6549}
@@ -6551,16 +6551,16 @@ unlock:
6551void set_cpu_sd_state_idle(void) 6551void set_cpu_sd_state_idle(void)
6552{ 6552{
6553 struct sched_domain *sd; 6553 struct sched_domain *sd;
6554 int cpu = smp_processor_id();
6554 6555
6555 rcu_read_lock(); 6556 rcu_read_lock();
6556 sd = rcu_dereference_check_sched_domain(this_rq()->sd); 6557 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6557 6558
6558 if (!sd || sd->nohz_idle) 6559 if (!sd || sd->nohz_idle)
6559 goto unlock; 6560 goto unlock;
6560 sd->nohz_idle = 1; 6561 sd->nohz_idle = 1;
6561 6562
6562 for (; sd; sd = sd->parent) 6563 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6563 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6564unlock: 6564unlock:
6565 rcu_read_unlock(); 6565 rcu_read_unlock();
6566} 6566}
@@ -6767,6 +6767,8 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
6767{ 6767{
6768 unsigned long now = jiffies; 6768 unsigned long now = jiffies;
6769 struct sched_domain *sd; 6769 struct sched_domain *sd;
6770 struct sched_group_power *sgp;
6771 int nr_busy;
6770 6772
6771 if (unlikely(idle_cpu(cpu))) 6773 if (unlikely(idle_cpu(cpu)))
6772 return 0; 6774 return 0;
@@ -6792,22 +6794,22 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
6792 goto need_kick; 6794 goto need_kick;
6793 6795
6794 rcu_read_lock(); 6796 rcu_read_lock();
6795 for_each_domain(cpu, sd) { 6797 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6796 struct sched_group *sg = sd->groups;
6797 struct sched_group_power *sgp = sg->sgp;
6798 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
6799 6798
6800 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1) 6799 if (sd) {
6801 goto need_kick_unlock; 6800 sgp = sd->groups->sgp;
6801 nr_busy = atomic_read(&sgp->nr_busy_cpus);
6802 6802
6803 if (sd->flags & SD_ASYM_PACKING 6803 if (nr_busy > 1)
6804 && (cpumask_first_and(nohz.idle_cpus_mask,
6805 sched_domain_span(sd)) < cpu))
6806 goto need_kick_unlock; 6804 goto need_kick_unlock;
6807
6808 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
6809 break;
6810 } 6805 }
6806
6807 sd = rcu_dereference(per_cpu(sd_asym, cpu));
6808
6809 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
6810 sched_domain_span(sd)) < cpu))
6811 goto need_kick_unlock;
6812
6811 rcu_read_unlock(); 6813 rcu_read_unlock();
6812 return 0; 6814 return 0;
6813 6815
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4e650acffed7..88c85b21d633 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -623,6 +623,8 @@ DECLARE_PER_CPU(struct sched_domain *, sd_llc);
623DECLARE_PER_CPU(int, sd_llc_size); 623DECLARE_PER_CPU(int, sd_llc_size);
624DECLARE_PER_CPU(int, sd_llc_id); 624DECLARE_PER_CPU(int, sd_llc_id);
625DECLARE_PER_CPU(struct sched_domain *, sd_numa); 625DECLARE_PER_CPU(struct sched_domain *, sd_numa);
626DECLARE_PER_CPU(struct sched_domain *, sd_busy);
627DECLARE_PER_CPU(struct sched_domain *, sd_asym);
626 628
627struct sched_group_power { 629struct sched_group_power {
628 atomic_t ref; 630 atomic_t ref;