aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c9
3 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4a559bf0622f..3cbfb55bde25 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -927,6 +927,7 @@ struct sched_group_power {
927struct sched_group { 927struct sched_group {
928 struct sched_group *next; /* Must be a circular list */ 928 struct sched_group *next; /* Must be a circular list */
929 atomic_t ref; 929 atomic_t ref;
930 int balance_cpu;
930 931
931 unsigned int group_weight; 932 unsigned int group_weight;
932 struct sched_group_power *sgp; 933 struct sched_group_power *sgp;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0533a688ce22..6001e5c3b4e4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6060,6 +6060,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6060 6060
6061 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); 6061 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
6062 atomic_inc(&sg->sgp->ref); 6062 atomic_inc(&sg->sgp->ref);
6063 sg->balance_cpu = -1;
6063 6064
6064 if (cpumask_test_cpu(cpu, sg_span)) 6065 if (cpumask_test_cpu(cpu, sg_span))
6065 groups = sg; 6066 groups = sg;
@@ -6135,6 +6136,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
6135 6136
6136 cpumask_clear(sched_group_cpus(sg)); 6137 cpumask_clear(sched_group_cpus(sg));
6137 sg->sgp->power = 0; 6138 sg->sgp->power = 0;
6139 sg->balance_cpu = -1;
6138 6140
6139 for_each_cpu(j, span) { 6141 for_each_cpu(j, span) {
6140 if (get_group(j, sdd, NULL) != group) 6142 if (get_group(j, sdd, NULL) != group)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 968ffee24721..cf86f74bcac2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3828,7 +3828,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3828 */ 3828 */
3829 if (local_group) { 3829 if (local_group) {
3830 if (idle != CPU_NEWLY_IDLE) { 3830 if (idle != CPU_NEWLY_IDLE) {
3831 if (balance_cpu != this_cpu) { 3831 if (balance_cpu != this_cpu ||
3832 cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) {
3832 *balance = 0; 3833 *balance = 0;
3833 return; 3834 return;
3834 } 3835 }
@@ -4929,7 +4930,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4929 int balance = 1; 4930 int balance = 1;
4930 struct rq *rq = cpu_rq(cpu); 4931 struct rq *rq = cpu_rq(cpu);
4931 unsigned long interval; 4932 unsigned long interval;
4932 struct sched_domain *sd; 4933 struct sched_domain *sd, *last = NULL;
4933 /* Earliest time when we have to do rebalance again */ 4934 /* Earliest time when we have to do rebalance again */
4934 unsigned long next_balance = jiffies + 60*HZ; 4935 unsigned long next_balance = jiffies + 60*HZ;
4935 int update_next_balance = 0; 4936 int update_next_balance = 0;
@@ -4939,6 +4940,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4939 4940
4940 rcu_read_lock(); 4941 rcu_read_lock();
4941 for_each_domain(cpu, sd) { 4942 for_each_domain(cpu, sd) {
4943 last = sd;
4942 if (!(sd->flags & SD_LOAD_BALANCE)) 4944 if (!(sd->flags & SD_LOAD_BALANCE))
4943 continue; 4945 continue;
4944 4946
@@ -4983,6 +4985,9 @@ out:
4983 if (!balance) 4985 if (!balance)
4984 break; 4986 break;
4985 } 4987 }
4988 for (sd = last; sd; sd = sd->child)
4989 (void)cmpxchg(&sd->groups->balance_cpu, cpu, -1);
4990
4986 rcu_read_unlock(); 4991 rcu_read_unlock();
4987 4992
4988 /* 4993 /*