aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-30 23:03:50 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-30 23:03:50 -0400
commit558f6ab9106e6be701acb0257e7171df1bbccf04 (patch)
tree6e811633baeb676693c493f6c82bf785cab2771d /kernel/sched.c
parent15f7176eb1cccec0a332541285ee752b935c1c85 (diff)
parent65fb0d23fcddd8697c871047b700c78817bdaa43 (diff)
Merge branch 'cpumask-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
Conflicts: arch/x86/include/asm/topology.h drivers/oprofile/buffer_sync.c (Both cases: changed in Linus' tree, removed in Ingo's).
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 196d48babbef..eb91695976e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3818,19 +3818,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3818 */ 3818 */
3819#define MAX_PINNED_INTERVAL 512 3819#define MAX_PINNED_INTERVAL 512
3820 3820
3821/* Working cpumask for load_balance and load_balance_newidle. */
3822static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3823
3821/* 3824/*
3822 * Check this_cpu to ensure it is balanced within domain. Attempt to move 3825 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3823 * tasks if there is an imbalance. 3826 * tasks if there is an imbalance.
3824 */ 3827 */
3825static int load_balance(int this_cpu, struct rq *this_rq, 3828static int load_balance(int this_cpu, struct rq *this_rq,
3826 struct sched_domain *sd, enum cpu_idle_type idle, 3829 struct sched_domain *sd, enum cpu_idle_type idle,
3827 int *balance, struct cpumask *cpus) 3830 int *balance)
3828{ 3831{
3829 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 3832 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3830 struct sched_group *group; 3833 struct sched_group *group;
3831 unsigned long imbalance; 3834 unsigned long imbalance;
3832 struct rq *busiest; 3835 struct rq *busiest;
3833 unsigned long flags; 3836 unsigned long flags;
3837 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3834 3838
3835 cpumask_setall(cpus); 3839 cpumask_setall(cpus);
3836 3840
@@ -3985,8 +3989,7 @@ out:
3985 * this_rq is locked. 3989 * this_rq is locked.
3986 */ 3990 */
3987static int 3991static int
3988load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, 3992load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
3989 struct cpumask *cpus)
3990{ 3993{
3991 struct sched_group *group; 3994 struct sched_group *group;
3992 struct rq *busiest = NULL; 3995 struct rq *busiest = NULL;
@@ -3994,6 +3997,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3994 int ld_moved = 0; 3997 int ld_moved = 0;
3995 int sd_idle = 0; 3998 int sd_idle = 0;
3996 int all_pinned = 0; 3999 int all_pinned = 0;
4000 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3997 4001
3998 cpumask_setall(cpus); 4002 cpumask_setall(cpus);
3999 4003
@@ -4134,10 +4138,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4134 struct sched_domain *sd; 4138 struct sched_domain *sd;
4135 int pulled_task = 0; 4139 int pulled_task = 0;
4136 unsigned long next_balance = jiffies + HZ; 4140 unsigned long next_balance = jiffies + HZ;
4137 cpumask_var_t tmpmask;
4138
4139 if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
4140 return;
4141 4141
4142 for_each_domain(this_cpu, sd) { 4142 for_each_domain(this_cpu, sd) {
4143 unsigned long interval; 4143 unsigned long interval;
@@ -4148,7 +4148,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4148 if (sd->flags & SD_BALANCE_NEWIDLE) 4148 if (sd->flags & SD_BALANCE_NEWIDLE)
4149 /* If we've pulled tasks over stop searching: */ 4149 /* If we've pulled tasks over stop searching: */
4150 pulled_task = load_balance_newidle(this_cpu, this_rq, 4150 pulled_task = load_balance_newidle(this_cpu, this_rq,
4151 sd, tmpmask); 4151 sd);
4152 4152
4153 interval = msecs_to_jiffies(sd->balance_interval); 4153 interval = msecs_to_jiffies(sd->balance_interval);
4154 if (time_after(next_balance, sd->last_balance + interval)) 4154 if (time_after(next_balance, sd->last_balance + interval))
@@ -4163,7 +4163,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4163 */ 4163 */
4164 this_rq->next_balance = next_balance; 4164 this_rq->next_balance = next_balance;
4165 } 4165 }
4166 free_cpumask_var(tmpmask);
4167} 4166}
4168 4167
4169/* 4168/*
@@ -4313,11 +4312,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4313 unsigned long next_balance = jiffies + 60*HZ; 4312 unsigned long next_balance = jiffies + 60*HZ;
4314 int update_next_balance = 0; 4313 int update_next_balance = 0;
4315 int need_serialize; 4314 int need_serialize;
4316 cpumask_var_t tmp;
4317
4318 /* Fails alloc? Rebalancing probably not a priority right now. */
4319 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
4320 return;
4321 4315
4322 for_each_domain(cpu, sd) { 4316 for_each_domain(cpu, sd) {
4323 if (!(sd->flags & SD_LOAD_BALANCE)) 4317 if (!(sd->flags & SD_LOAD_BALANCE))
@@ -4342,7 +4336,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4342 } 4336 }
4343 4337
4344 if (time_after_eq(jiffies, sd->last_balance + interval)) { 4338 if (time_after_eq(jiffies, sd->last_balance + interval)) {
4345 if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { 4339 if (load_balance(cpu, rq, sd, idle, &balance)) {
4346 /* 4340 /*
4347 * We've pulled tasks over so either we're no 4341 * We've pulled tasks over so either we're no
4348 * longer idle, or one of our SMT siblings is 4342 * longer idle, or one of our SMT siblings is
@@ -4376,8 +4370,6 @@ out:
4376 */ 4370 */
4377 if (likely(update_next_balance)) 4371 if (likely(update_next_balance))
4378 rq->next_balance = next_balance; 4372 rq->next_balance = next_balance;
4379
4380 free_cpumask_var(tmp);
4381} 4373}
4382 4374
4383/* 4375/*
@@ -7713,7 +7705,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7713{ 7705{
7714 int group; 7706 int group;
7715 7707
7716 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7708 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
7717 group = cpumask_first(mask); 7709 group = cpumask_first(mask);
7718 if (sg) 7710 if (sg)
7719 *sg = &per_cpu(sched_group_core, group).sg; 7711 *sg = &per_cpu(sched_group_core, group).sg;
@@ -7742,7 +7734,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7742 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); 7734 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
7743 group = cpumask_first(mask); 7735 group = cpumask_first(mask);
7744#elif defined(CONFIG_SCHED_SMT) 7736#elif defined(CONFIG_SCHED_SMT)
7745 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7737 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
7746 group = cpumask_first(mask); 7738 group = cpumask_first(mask);
7747#else 7739#else
7748 group = cpu; 7740 group = cpu;
@@ -8085,7 +8077,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8085 SD_INIT(sd, SIBLING); 8077 SD_INIT(sd, SIBLING);
8086 set_domain_attribute(sd, attr); 8078 set_domain_attribute(sd, attr);
8087 cpumask_and(sched_domain_span(sd), 8079 cpumask_and(sched_domain_span(sd),
8088 &per_cpu(cpu_sibling_map, i), cpu_map); 8080 topology_thread_cpumask(i), cpu_map);
8089 sd->parent = p; 8081 sd->parent = p;
8090 p->child = sd; 8082 p->child = sd;
8091 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); 8083 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -8096,7 +8088,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8096 /* Set up CPU (sibling) groups */ 8088 /* Set up CPU (sibling) groups */
8097 for_each_cpu(i, cpu_map) { 8089 for_each_cpu(i, cpu_map) {
8098 cpumask_and(this_sibling_map, 8090 cpumask_and(this_sibling_map,
8099 &per_cpu(cpu_sibling_map, i), cpu_map); 8091 topology_thread_cpumask(i), cpu_map);
8100 if (i != cpumask_first(this_sibling_map)) 8092 if (i != cpumask_first(this_sibling_map))
8101 continue; 8093 continue;
8102 8094
@@ -8772,6 +8764,9 @@ void __init sched_init(void)
8772#ifdef CONFIG_USER_SCHED 8764#ifdef CONFIG_USER_SCHED
8773 alloc_size *= 2; 8765 alloc_size *= 2;
8774#endif 8766#endif
8767#ifdef CONFIG_CPUMASK_OFFSTACK
8768 alloc_size += num_possible_cpus() * cpumask_size();
8769#endif
8775 /* 8770 /*
8776 * As sched_init() is called before page_alloc is setup, 8771 * As sched_init() is called before page_alloc is setup,
8777 * we use alloc_bootmem(). 8772 * we use alloc_bootmem().
@@ -8809,6 +8804,12 @@ void __init sched_init(void)
8809 ptr += nr_cpu_ids * sizeof(void **); 8804 ptr += nr_cpu_ids * sizeof(void **);
8810#endif /* CONFIG_USER_SCHED */ 8805#endif /* CONFIG_USER_SCHED */
8811#endif /* CONFIG_RT_GROUP_SCHED */ 8806#endif /* CONFIG_RT_GROUP_SCHED */
8807#ifdef CONFIG_CPUMASK_OFFSTACK
8808 for_each_possible_cpu(i) {
8809 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8810 ptr += cpumask_size();
8811 }
8812#endif /* CONFIG_CPUMASK_OFFSTACK */
8812 } 8813 }
8813 8814
8814#ifdef CONFIG_SMP 8815#ifdef CONFIG_SMP