diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 89 |
1 files changed, 69 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 50860ad5b624..9508527845df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4970,10 +4970,10 @@ static int cpu_to_phys_group(int cpu) | |||
4970 | * gets dynamically allocated. | 4970 | * gets dynamically allocated. |
4971 | */ | 4971 | */ |
4972 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 4972 | static DEFINE_PER_CPU(struct sched_domain, node_domains); |
4973 | static struct sched_group *sched_group_nodes[MAX_NUMNODES]; | 4973 | static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; |
4974 | 4974 | ||
4975 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 4975 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
4976 | static struct sched_group sched_group_allnodes[MAX_NUMNODES]; | 4976 | static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS]; |
4977 | 4977 | ||
4978 | static int cpu_to_allnodes_group(int cpu) | 4978 | static int cpu_to_allnodes_group(int cpu) |
4979 | { | 4979 | { |
@@ -4988,6 +4988,21 @@ static int cpu_to_allnodes_group(int cpu) | |||
4988 | void build_sched_domains(const cpumask_t *cpu_map) | 4988 | void build_sched_domains(const cpumask_t *cpu_map) |
4989 | { | 4989 | { |
4990 | int i; | 4990 | int i; |
4991 | #ifdef CONFIG_NUMA | ||
4992 | struct sched_group **sched_group_nodes = NULL; | ||
4993 | struct sched_group *sched_group_allnodes = NULL; | ||
4994 | |||
4995 | /* | ||
4996 | * Allocate the per-node list of sched groups | ||
4997 | */ | ||
4998 | sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES, | ||
4999 | GFP_ATOMIC); | ||
5000 | if (!sched_group_nodes) { | ||
5001 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | ||
5002 | return; | ||
5003 | } | ||
5004 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | ||
5005 | #endif | ||
4991 | 5006 | ||
4992 | /* | 5007 | /* |
4993 | * Set up domains for cpus specified by the cpu_map. | 5008 | * Set up domains for cpus specified by the cpu_map. |
@@ -5000,8 +5015,21 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5000 | cpus_and(nodemask, nodemask, *cpu_map); | 5015 | cpus_and(nodemask, nodemask, *cpu_map); |
5001 | 5016 | ||
5002 | #ifdef CONFIG_NUMA | 5017 | #ifdef CONFIG_NUMA |
5003 | if (num_online_cpus() | 5018 | if (cpus_weight(*cpu_map) |
5004 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { | 5019 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { |
5020 | if (!sched_group_allnodes) { | ||
5021 | sched_group_allnodes | ||
5022 | = kmalloc(sizeof(struct sched_group) | ||
5023 | * MAX_NUMNODES, | ||
5024 | GFP_KERNEL); | ||
5025 | if (!sched_group_allnodes) { | ||
5026 | printk(KERN_WARNING | ||
5027 | "Can not alloc allnodes sched group\n"); | ||
5028 | break; | ||
5029 | } | ||
5030 | sched_group_allnodes_bycpu[i] | ||
5031 | = sched_group_allnodes; | ||
5032 | } | ||
5005 | sd = &per_cpu(allnodes_domains, i); | 5033 | sd = &per_cpu(allnodes_domains, i); |
5006 | *sd = SD_ALLNODES_INIT; | 5034 | *sd = SD_ALLNODES_INIT; |
5007 | sd->span = *cpu_map; | 5035 | sd->span = *cpu_map; |
@@ -5065,8 +5093,9 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5065 | 5093 | ||
5066 | #ifdef CONFIG_NUMA | 5094 | #ifdef CONFIG_NUMA |
5067 | /* Set up node groups */ | 5095 | /* Set up node groups */ |
5068 | init_sched_build_groups(sched_group_allnodes, *cpu_map, | 5096 | if (sched_group_allnodes) |
5069 | &cpu_to_allnodes_group); | 5097 | init_sched_build_groups(sched_group_allnodes, *cpu_map, |
5098 | &cpu_to_allnodes_group); | ||
5070 | 5099 | ||
5071 | for (i = 0; i < MAX_NUMNODES; i++) { | 5100 | for (i = 0; i < MAX_NUMNODES; i++) { |
5072 | /* Set up node groups */ | 5101 | /* Set up node groups */ |
@@ -5077,8 +5106,10 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5077 | int j; | 5106 | int j; |
5078 | 5107 | ||
5079 | cpus_and(nodemask, nodemask, *cpu_map); | 5108 | cpus_and(nodemask, nodemask, *cpu_map); |
5080 | if (cpus_empty(nodemask)) | 5109 | if (cpus_empty(nodemask)) { |
5110 | sched_group_nodes[i] = NULL; | ||
5081 | continue; | 5111 | continue; |
5112 | } | ||
5082 | 5113 | ||
5083 | domainspan = sched_domain_node_span(i); | 5114 | domainspan = sched_domain_node_span(i); |
5084 | cpus_and(domainspan, domainspan, *cpu_map); | 5115 | cpus_and(domainspan, domainspan, *cpu_map); |
@@ -5223,24 +5254,42 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map) | |||
5223 | { | 5254 | { |
5224 | #ifdef CONFIG_NUMA | 5255 | #ifdef CONFIG_NUMA |
5225 | int i; | 5256 | int i; |
5226 | for (i = 0; i < MAX_NUMNODES; i++) { | 5257 | int cpu; |
5227 | cpumask_t nodemask = node_to_cpumask(i); | ||
5228 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
5229 | 5258 | ||
5230 | cpus_and(nodemask, nodemask, *cpu_map); | 5259 | for_each_cpu_mask(cpu, *cpu_map) { |
5231 | if (cpus_empty(nodemask)) | 5260 | struct sched_group *sched_group_allnodes |
5232 | continue; | 5261 | = sched_group_allnodes_bycpu[cpu]; |
5262 | struct sched_group **sched_group_nodes | ||
5263 | = sched_group_nodes_bycpu[cpu]; | ||
5233 | 5264 | ||
5234 | if (sg == NULL) | 5265 | if (sched_group_allnodes) { |
5266 | kfree(sched_group_allnodes); | ||
5267 | sched_group_allnodes_bycpu[cpu] = NULL; | ||
5268 | } | ||
5269 | |||
5270 | if (!sched_group_nodes) | ||
5235 | continue; | 5271 | continue; |
5236 | sg = sg->next; | 5272 | |
5273 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
5274 | cpumask_t nodemask = node_to_cpumask(i); | ||
5275 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
5276 | |||
5277 | cpus_and(nodemask, nodemask, *cpu_map); | ||
5278 | if (cpus_empty(nodemask)) | ||
5279 | continue; | ||
5280 | |||
5281 | if (sg == NULL) | ||
5282 | continue; | ||
5283 | sg = sg->next; | ||
5237 | next_sg: | 5284 | next_sg: |
5238 | oldsg = sg; | 5285 | oldsg = sg; |
5239 | sg = sg->next; | 5286 | sg = sg->next; |
5240 | kfree(oldsg); | 5287 | kfree(oldsg); |
5241 | if (oldsg != sched_group_nodes[i]) | 5288 | if (oldsg != sched_group_nodes[i]) |
5242 | goto next_sg; | 5289 | goto next_sg; |
5243 | sched_group_nodes[i] = NULL; | 5290 | } |
5291 | kfree(sched_group_nodes); | ||
5292 | sched_group_nodes_bycpu[cpu] = NULL; | ||
5244 | } | 5293 | } |
5245 | #endif | 5294 | #endif |
5246 | } | 5295 | } |