aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5757e03cfac0..0ad7c28b7236 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3818,19 +3818,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3818 */ 3818 */
3819#define MAX_PINNED_INTERVAL 512 3819#define MAX_PINNED_INTERVAL 512
3820 3820
3821/* Working cpumask for load_balance and load_balance_newidle. */
3822static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3823
3821/* 3824/*
3822 * Check this_cpu to ensure it is balanced within domain. Attempt to move 3825 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3823 * tasks if there is an imbalance. 3826 * tasks if there is an imbalance.
3824 */ 3827 */
3825static int load_balance(int this_cpu, struct rq *this_rq, 3828static int load_balance(int this_cpu, struct rq *this_rq,
3826 struct sched_domain *sd, enum cpu_idle_type idle, 3829 struct sched_domain *sd, enum cpu_idle_type idle,
3827 int *balance, struct cpumask *cpus) 3830 int *balance)
3828{ 3831{
3829 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 3832 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3830 struct sched_group *group; 3833 struct sched_group *group;
3831 unsigned long imbalance; 3834 unsigned long imbalance;
3832 struct rq *busiest; 3835 struct rq *busiest;
3833 unsigned long flags; 3836 unsigned long flags;
3837 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3834 3838
3835 cpumask_setall(cpus); 3839 cpumask_setall(cpus);
3836 3840
@@ -3985,8 +3989,7 @@ out:
3985 * this_rq is locked. 3989 * this_rq is locked.
3986 */ 3990 */
3987static int 3991static int
3988load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, 3992load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
3989 struct cpumask *cpus)
3990{ 3993{
3991 struct sched_group *group; 3994 struct sched_group *group;
3992 struct rq *busiest = NULL; 3995 struct rq *busiest = NULL;
@@ -3994,6 +3997,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3994 int ld_moved = 0; 3997 int ld_moved = 0;
3995 int sd_idle = 0; 3998 int sd_idle = 0;
3996 int all_pinned = 0; 3999 int all_pinned = 0;
4000 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3997 4001
3998 cpumask_setall(cpus); 4002 cpumask_setall(cpus);
3999 4003
@@ -4134,10 +4138,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4134 struct sched_domain *sd; 4138 struct sched_domain *sd;
4135 int pulled_task = 0; 4139 int pulled_task = 0;
4136 unsigned long next_balance = jiffies + HZ; 4140 unsigned long next_balance = jiffies + HZ;
4137 cpumask_var_t tmpmask;
4138
4139 if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
4140 return;
4141 4141
4142 for_each_domain(this_cpu, sd) { 4142 for_each_domain(this_cpu, sd) {
4143 unsigned long interval; 4143 unsigned long interval;
@@ -4148,7 +4148,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4148 if (sd->flags & SD_BALANCE_NEWIDLE) 4148 if (sd->flags & SD_BALANCE_NEWIDLE)
4149 /* If we've pulled tasks over stop searching: */ 4149 /* If we've pulled tasks over stop searching: */
4150 pulled_task = load_balance_newidle(this_cpu, this_rq, 4150 pulled_task = load_balance_newidle(this_cpu, this_rq,
4151 sd, tmpmask); 4151 sd);
4152 4152
4153 interval = msecs_to_jiffies(sd->balance_interval); 4153 interval = msecs_to_jiffies(sd->balance_interval);
4154 if (time_after(next_balance, sd->last_balance + interval)) 4154 if (time_after(next_balance, sd->last_balance + interval))
@@ -4163,7 +4163,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4163 */ 4163 */
4164 this_rq->next_balance = next_balance; 4164 this_rq->next_balance = next_balance;
4165 } 4165 }
4166 free_cpumask_var(tmpmask);
4167} 4166}
4168 4167
4169/* 4168/*
@@ -4313,11 +4312,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4313 unsigned long next_balance = jiffies + 60*HZ; 4312 unsigned long next_balance = jiffies + 60*HZ;
4314 int update_next_balance = 0; 4313 int update_next_balance = 0;
4315 int need_serialize; 4314 int need_serialize;
4316 cpumask_var_t tmp;
4317
4318 /* Fails alloc? Rebalancing probably not a priority right now. */
4319 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
4320 return;
4321 4315
4322 for_each_domain(cpu, sd) { 4316 for_each_domain(cpu, sd) {
4323 if (!(sd->flags & SD_LOAD_BALANCE)) 4317 if (!(sd->flags & SD_LOAD_BALANCE))
@@ -4342,7 +4336,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4342 } 4336 }
4343 4337
4344 if (time_after_eq(jiffies, sd->last_balance + interval)) { 4338 if (time_after_eq(jiffies, sd->last_balance + interval)) {
4345 if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { 4339 if (load_balance(cpu, rq, sd, idle, &balance)) {
4346 /* 4340 /*
4347 * We've pulled tasks over so either we're no 4341 * We've pulled tasks over so either we're no
4348 * longer idle, or one of our SMT siblings is 4342 * longer idle, or one of our SMT siblings is
@@ -4376,8 +4370,6 @@ out:
4376 */ 4370 */
4377 if (likely(update_next_balance)) 4371 if (likely(update_next_balance))
4378 rq->next_balance = next_balance; 4372 rq->next_balance = next_balance;
4379
4380 free_cpumask_var(tmp);
4381} 4373}
4382 4374
4383/* 4375/*
@@ -7648,7 +7640,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7648{ 7640{
7649 int group; 7641 int group;
7650 7642
7651 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7643 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
7652 group = cpumask_first(mask); 7644 group = cpumask_first(mask);
7653 if (sg) 7645 if (sg)
7654 *sg = &per_cpu(sched_group_core, group).sg; 7646 *sg = &per_cpu(sched_group_core, group).sg;
@@ -7677,7 +7669,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7677 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); 7669 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
7678 group = cpumask_first(mask); 7670 group = cpumask_first(mask);
7679#elif defined(CONFIG_SCHED_SMT) 7671#elif defined(CONFIG_SCHED_SMT)
7680 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7672 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
7681 group = cpumask_first(mask); 7673 group = cpumask_first(mask);
7682#else 7674#else
7683 group = cpu; 7675 group = cpu;
@@ -8020,7 +8012,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8020 SD_INIT(sd, SIBLING); 8012 SD_INIT(sd, SIBLING);
8021 set_domain_attribute(sd, attr); 8013 set_domain_attribute(sd, attr);
8022 cpumask_and(sched_domain_span(sd), 8014 cpumask_and(sched_domain_span(sd),
8023 &per_cpu(cpu_sibling_map, i), cpu_map); 8015 topology_thread_cpumask(i), cpu_map);
8024 sd->parent = p; 8016 sd->parent = p;
8025 p->child = sd; 8017 p->child = sd;
8026 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); 8018 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -8031,7 +8023,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8031 /* Set up CPU (sibling) groups */ 8023 /* Set up CPU (sibling) groups */
8032 for_each_cpu(i, cpu_map) { 8024 for_each_cpu(i, cpu_map) {
8033 cpumask_and(this_sibling_map, 8025 cpumask_and(this_sibling_map,
8034 &per_cpu(cpu_sibling_map, i), cpu_map); 8026 topology_thread_cpumask(i), cpu_map);
8035 if (i != cpumask_first(this_sibling_map)) 8027 if (i != cpumask_first(this_sibling_map))
8036 continue; 8028 continue;
8037 8029
@@ -8707,6 +8699,9 @@ void __init sched_init(void)
8707#ifdef CONFIG_USER_SCHED 8699#ifdef CONFIG_USER_SCHED
8708 alloc_size *= 2; 8700 alloc_size *= 2;
8709#endif 8701#endif
8702#ifdef CONFIG_CPUMASK_OFFSTACK
8703 alloc_size += num_possible_cpus() * cpumask_size();
8704#endif
8710 /* 8705 /*
8711 * As sched_init() is called before page_alloc is setup, 8706 * As sched_init() is called before page_alloc is setup,
8712 * we use alloc_bootmem(). 8707 * we use alloc_bootmem().
@@ -8744,6 +8739,12 @@ void __init sched_init(void)
8744 ptr += nr_cpu_ids * sizeof(void **); 8739 ptr += nr_cpu_ids * sizeof(void **);
8745#endif /* CONFIG_USER_SCHED */ 8740#endif /* CONFIG_USER_SCHED */
8746#endif /* CONFIG_RT_GROUP_SCHED */ 8741#endif /* CONFIG_RT_GROUP_SCHED */
8742#ifdef CONFIG_CPUMASK_OFFSTACK
8743 for_each_possible_cpu(i) {
8744 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8745 ptr += cpumask_size();
8746 }
8747#endif /* CONFIG_CPUMASK_OFFSTACK */
8747 } 8748 }
8748 8749
8749#ifdef CONFIG_SMP 8750#ifdef CONFIG_SMP