aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-05 13:33:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-05 13:33:07 -0400
commit90975ef71246c5c688ead04e8ff6f36dc92d28b3 (patch)
treeeda44b2efe91509719b0e62219c2efec13a9e762 /kernel/sched.c
parentcab4e4c43f92582a2bfc026137b3d8a175bd0360 (diff)
parent558f6ab9106e6be701acb0257e7171df1bbccf04 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask: (36 commits) cpumask: remove cpumask allocation from idle_balance, fix numa, cpumask: move numa_node_id default implementation to topology.h, fix cpumask: remove cpumask allocation from idle_balance x86: cpumask: x86 mmio-mod.c use cpumask_var_t for downed_cpus x86: cpumask: update 32-bit APM not to mug current->cpus_allowed x86: microcode: cleanup x86: cpumask: use work_on_cpu in arch/x86/kernel/microcode_core.c cpumask: fix CONFIG_CPUMASK_OFFSTACK=y cpu hotunplug crash numa, cpumask: move numa_node_id default implementation to topology.h cpumask: convert node_to_cpumask_map[] to cpumask_var_t cpumask: remove x86 cpumask_t uses. cpumask: use cpumask_var_t in uv_flush_tlb_others. cpumask: remove cpumask_t assignment from vector_allocation_domain() cpumask: make Xen use the new operators. cpumask: clean up summit's send_IPI functions cpumask: use new cpumask functions throughout x86 x86: unify cpu_callin_mask/cpu_callout_mask/cpu_initialized_mask/cpu_sibling_setup_mask cpumask: convert struct cpuinfo_x86's llc_shared_map to cpumask_var_t cpumask: convert node_to_cpumask_map[] to cpumask_var_t x86: unify 32 and 64-bit node_to_cpumask_map ...
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2325db2be31b..55a10b8e31bb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3818,19 +3818,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3818 */ 3818 */
3819#define MAX_PINNED_INTERVAL 512 3819#define MAX_PINNED_INTERVAL 512
3820 3820
3821/* Working cpumask for load_balance and load_balance_newidle. */
3822static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3823
3821/* 3824/*
3822 * Check this_cpu to ensure it is balanced within domain. Attempt to move 3825 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3823 * tasks if there is an imbalance. 3826 * tasks if there is an imbalance.
3824 */ 3827 */
3825static int load_balance(int this_cpu, struct rq *this_rq, 3828static int load_balance(int this_cpu, struct rq *this_rq,
3826 struct sched_domain *sd, enum cpu_idle_type idle, 3829 struct sched_domain *sd, enum cpu_idle_type idle,
3827 int *balance, struct cpumask *cpus) 3830 int *balance)
3828{ 3831{
3829 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 3832 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3830 struct sched_group *group; 3833 struct sched_group *group;
3831 unsigned long imbalance; 3834 unsigned long imbalance;
3832 struct rq *busiest; 3835 struct rq *busiest;
3833 unsigned long flags; 3836 unsigned long flags;
3837 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3834 3838
3835 cpumask_setall(cpus); 3839 cpumask_setall(cpus);
3836 3840
@@ -3985,8 +3989,7 @@ out:
3985 * this_rq is locked. 3989 * this_rq is locked.
3986 */ 3990 */
3987static int 3991static int
3988load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, 3992load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
3989 struct cpumask *cpus)
3990{ 3993{
3991 struct sched_group *group; 3994 struct sched_group *group;
3992 struct rq *busiest = NULL; 3995 struct rq *busiest = NULL;
@@ -3994,6 +3997,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3994 int ld_moved = 0; 3997 int ld_moved = 0;
3995 int sd_idle = 0; 3998 int sd_idle = 0;
3996 int all_pinned = 0; 3999 int all_pinned = 0;
4000 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3997 4001
3998 cpumask_setall(cpus); 4002 cpumask_setall(cpus);
3999 4003
@@ -4134,10 +4138,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4134 struct sched_domain *sd; 4138 struct sched_domain *sd;
4135 int pulled_task = 0; 4139 int pulled_task = 0;
4136 unsigned long next_balance = jiffies + HZ; 4140 unsigned long next_balance = jiffies + HZ;
4137 cpumask_var_t tmpmask;
4138
4139 if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
4140 return;
4141 4141
4142 for_each_domain(this_cpu, sd) { 4142 for_each_domain(this_cpu, sd) {
4143 unsigned long interval; 4143 unsigned long interval;
@@ -4148,7 +4148,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4148 if (sd->flags & SD_BALANCE_NEWIDLE) 4148 if (sd->flags & SD_BALANCE_NEWIDLE)
4149 /* If we've pulled tasks over stop searching: */ 4149 /* If we've pulled tasks over stop searching: */
4150 pulled_task = load_balance_newidle(this_cpu, this_rq, 4150 pulled_task = load_balance_newidle(this_cpu, this_rq,
4151 sd, tmpmask); 4151 sd);
4152 4152
4153 interval = msecs_to_jiffies(sd->balance_interval); 4153 interval = msecs_to_jiffies(sd->balance_interval);
4154 if (time_after(next_balance, sd->last_balance + interval)) 4154 if (time_after(next_balance, sd->last_balance + interval))
@@ -4163,7 +4163,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4163 */ 4163 */
4164 this_rq->next_balance = next_balance; 4164 this_rq->next_balance = next_balance;
4165 } 4165 }
4166 free_cpumask_var(tmpmask);
4167} 4166}
4168 4167
4169/* 4168/*
@@ -4313,11 +4312,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4313 unsigned long next_balance = jiffies + 60*HZ; 4312 unsigned long next_balance = jiffies + 60*HZ;
4314 int update_next_balance = 0; 4313 int update_next_balance = 0;
4315 int need_serialize; 4314 int need_serialize;
4316 cpumask_var_t tmp;
4317
4318 /* Fails alloc? Rebalancing probably not a priority right now. */
4319 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
4320 return;
4321 4315
4322 for_each_domain(cpu, sd) { 4316 for_each_domain(cpu, sd) {
4323 if (!(sd->flags & SD_LOAD_BALANCE)) 4317 if (!(sd->flags & SD_LOAD_BALANCE))
@@ -4342,7 +4336,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4342 } 4336 }
4343 4337
4344 if (time_after_eq(jiffies, sd->last_balance + interval)) { 4338 if (time_after_eq(jiffies, sd->last_balance + interval)) {
4345 if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { 4339 if (load_balance(cpu, rq, sd, idle, &balance)) {
4346 /* 4340 /*
4347 * We've pulled tasks over so either we're no 4341 * We've pulled tasks over so either we're no
4348 * longer idle, or one of our SMT siblings is 4342 * longer idle, or one of our SMT siblings is
@@ -4376,8 +4370,6 @@ out:
4376 */ 4370 */
4377 if (likely(update_next_balance)) 4371 if (likely(update_next_balance))
4378 rq->next_balance = next_balance; 4372 rq->next_balance = next_balance;
4379
4380 free_cpumask_var(tmp);
4381} 4373}
4382 4374
4383/* 4375/*
@@ -7728,7 +7720,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7728{ 7720{
7729 int group; 7721 int group;
7730 7722
7731 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7723 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
7732 group = cpumask_first(mask); 7724 group = cpumask_first(mask);
7733 if (sg) 7725 if (sg)
7734 *sg = &per_cpu(sched_group_core, group).sg; 7726 *sg = &per_cpu(sched_group_core, group).sg;
@@ -7757,7 +7749,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7757 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); 7749 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
7758 group = cpumask_first(mask); 7750 group = cpumask_first(mask);
7759#elif defined(CONFIG_SCHED_SMT) 7751#elif defined(CONFIG_SCHED_SMT)
7760 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); 7752 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
7761 group = cpumask_first(mask); 7753 group = cpumask_first(mask);
7762#else 7754#else
7763 group = cpu; 7755 group = cpu;
@@ -8100,7 +8092,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8100 SD_INIT(sd, SIBLING); 8092 SD_INIT(sd, SIBLING);
8101 set_domain_attribute(sd, attr); 8093 set_domain_attribute(sd, attr);
8102 cpumask_and(sched_domain_span(sd), 8094 cpumask_and(sched_domain_span(sd),
8103 &per_cpu(cpu_sibling_map, i), cpu_map); 8095 topology_thread_cpumask(i), cpu_map);
8104 sd->parent = p; 8096 sd->parent = p;
8105 p->child = sd; 8097 p->child = sd;
8106 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); 8098 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -8111,7 +8103,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8111 /* Set up CPU (sibling) groups */ 8103 /* Set up CPU (sibling) groups */
8112 for_each_cpu(i, cpu_map) { 8104 for_each_cpu(i, cpu_map) {
8113 cpumask_and(this_sibling_map, 8105 cpumask_and(this_sibling_map,
8114 &per_cpu(cpu_sibling_map, i), cpu_map); 8106 topology_thread_cpumask(i), cpu_map);
8115 if (i != cpumask_first(this_sibling_map)) 8107 if (i != cpumask_first(this_sibling_map))
8116 continue; 8108 continue;
8117 8109
@@ -8787,6 +8779,9 @@ void __init sched_init(void)
8787#ifdef CONFIG_USER_SCHED 8779#ifdef CONFIG_USER_SCHED
8788 alloc_size *= 2; 8780 alloc_size *= 2;
8789#endif 8781#endif
8782#ifdef CONFIG_CPUMASK_OFFSTACK
8783 alloc_size += num_possible_cpus() * cpumask_size();
8784#endif
8790 /* 8785 /*
8791 * As sched_init() is called before page_alloc is setup, 8786 * As sched_init() is called before page_alloc is setup,
8792 * we use alloc_bootmem(). 8787 * we use alloc_bootmem().
@@ -8824,6 +8819,12 @@ void __init sched_init(void)
8824 ptr += nr_cpu_ids * sizeof(void **); 8819 ptr += nr_cpu_ids * sizeof(void **);
8825#endif /* CONFIG_USER_SCHED */ 8820#endif /* CONFIG_USER_SCHED */
8826#endif /* CONFIG_RT_GROUP_SCHED */ 8821#endif /* CONFIG_RT_GROUP_SCHED */
8822#ifdef CONFIG_CPUMASK_OFFSTACK
8823 for_each_possible_cpu(i) {
8824 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8825 ptr += cpumask_size();
8826 }
8827#endif /* CONFIG_CPUMASK_OFFSTACK */
8827 } 8828 }
8828 8829
8829#ifdef CONFIG_SMP 8830#ifdef CONFIG_SMP