diff options
author | Srivatsa Vaddagiri <vatsa@in.ibm.com> | 2006-06-27 05:54:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-27 20:32:45 -0400 |
commit | 51888ca25a03125e742ef84d4ddfd74e139707a0 (patch) | |
tree | b15e50f3b67f6e2b94b783fce603d4a1f54a8189 /kernel/sched.c | |
parent | 615052dc3bf96278a843a64d3d1eea03532028c3 (diff) |
[PATCH] sched_domain: handle kmalloc failure
Try to handle mem allocation failures in build_sched_domains by bailing out
and cleaning up thus-far allocated memory. The patch has a direct consequence
that we disable load balancing completely (even at sibling level) upon *any*
memory allocation failure.
[Lee.Schermerhorn@hp.com: bugfix]
Signed-off-by: Srivatsa Vaddagir <vatsa@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 139 |
1 files changed, 78 insertions, 61 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0ec84f57695d..77a2ec55ef7d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5820,11 +5820,56 @@ next_sg: | |||
5820 | } | 5820 | } |
5821 | #endif | 5821 | #endif |
5822 | 5822 | ||
5823 | /* Free memory allocated for various sched_group structures */ | ||
5824 | static void free_sched_groups(const cpumask_t *cpu_map) | ||
5825 | { | ||
5826 | #ifdef CONFIG_NUMA | ||
5827 | int i; | ||
5828 | int cpu; | ||
5829 | |||
5830 | for_each_cpu_mask(cpu, *cpu_map) { | ||
5831 | struct sched_group *sched_group_allnodes | ||
5832 | = sched_group_allnodes_bycpu[cpu]; | ||
5833 | struct sched_group **sched_group_nodes | ||
5834 | = sched_group_nodes_bycpu[cpu]; | ||
5835 | |||
5836 | if (sched_group_allnodes) { | ||
5837 | kfree(sched_group_allnodes); | ||
5838 | sched_group_allnodes_bycpu[cpu] = NULL; | ||
5839 | } | ||
5840 | |||
5841 | if (!sched_group_nodes) | ||
5842 | continue; | ||
5843 | |||
5844 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
5845 | cpumask_t nodemask = node_to_cpumask(i); | ||
5846 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
5847 | |||
5848 | cpus_and(nodemask, nodemask, *cpu_map); | ||
5849 | if (cpus_empty(nodemask)) | ||
5850 | continue; | ||
5851 | |||
5852 | if (sg == NULL) | ||
5853 | continue; | ||
5854 | sg = sg->next; | ||
5855 | next_sg: | ||
5856 | oldsg = sg; | ||
5857 | sg = sg->next; | ||
5858 | kfree(oldsg); | ||
5859 | if (oldsg != sched_group_nodes[i]) | ||
5860 | goto next_sg; | ||
5861 | } | ||
5862 | kfree(sched_group_nodes); | ||
5863 | sched_group_nodes_bycpu[cpu] = NULL; | ||
5864 | } | ||
5865 | #endif | ||
5866 | } | ||
5867 | |||
5823 | /* | 5868 | /* |
5824 | * Build sched domains for a given set of cpus and attach the sched domains | 5869 | * Build sched domains for a given set of cpus and attach the sched domains |
5825 | * to the individual cpus | 5870 | * to the individual cpus |
5826 | */ | 5871 | */ |
5827 | void build_sched_domains(const cpumask_t *cpu_map) | 5872 | static int build_sched_domains(const cpumask_t *cpu_map) |
5828 | { | 5873 | { |
5829 | int i; | 5874 | int i; |
5830 | #ifdef CONFIG_NUMA | 5875 | #ifdef CONFIG_NUMA |
@@ -5834,11 +5879,11 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5834 | /* | 5879 | /* |
5835 | * Allocate the per-node list of sched groups | 5880 | * Allocate the per-node list of sched groups |
5836 | */ | 5881 | */ |
5837 | sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES, | 5882 | sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES, |
5838 | GFP_ATOMIC); | 5883 | GFP_ATOMIC); |
5839 | if (!sched_group_nodes) { | 5884 | if (!sched_group_nodes) { |
5840 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 5885 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
5841 | return; | 5886 | return -ENOMEM; |
5842 | } | 5887 | } |
5843 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | 5888 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; |
5844 | #endif | 5889 | #endif |
@@ -5864,7 +5909,7 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5864 | if (!sched_group_allnodes) { | 5909 | if (!sched_group_allnodes) { |
5865 | printk(KERN_WARNING | 5910 | printk(KERN_WARNING |
5866 | "Can not alloc allnodes sched group\n"); | 5911 | "Can not alloc allnodes sched group\n"); |
5867 | break; | 5912 | goto error; |
5868 | } | 5913 | } |
5869 | sched_group_allnodes_bycpu[i] | 5914 | sched_group_allnodes_bycpu[i] |
5870 | = sched_group_allnodes; | 5915 | = sched_group_allnodes; |
@@ -5978,23 +6023,20 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
5978 | cpus_and(domainspan, domainspan, *cpu_map); | 6023 | cpus_and(domainspan, domainspan, *cpu_map); |
5979 | 6024 | ||
5980 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | 6025 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); |
6026 | if (!sg) { | ||
6027 | printk(KERN_WARNING "Can not alloc domain group for " | ||
6028 | "node %d\n", i); | ||
6029 | goto error; | ||
6030 | } | ||
5981 | sched_group_nodes[i] = sg; | 6031 | sched_group_nodes[i] = sg; |
5982 | for_each_cpu_mask(j, nodemask) { | 6032 | for_each_cpu_mask(j, nodemask) { |
5983 | struct sched_domain *sd; | 6033 | struct sched_domain *sd; |
5984 | sd = &per_cpu(node_domains, j); | 6034 | sd = &per_cpu(node_domains, j); |
5985 | sd->groups = sg; | 6035 | sd->groups = sg; |
5986 | if (sd->groups == NULL) { | ||
5987 | /* Turn off balancing if we have no groups */ | ||
5988 | sd->flags = 0; | ||
5989 | } | ||
5990 | } | ||
5991 | if (!sg) { | ||
5992 | printk(KERN_WARNING | ||
5993 | "Can not alloc domain group for node %d\n", i); | ||
5994 | continue; | ||
5995 | } | 6036 | } |
5996 | sg->cpu_power = 0; | 6037 | sg->cpu_power = 0; |
5997 | sg->cpumask = nodemask; | 6038 | sg->cpumask = nodemask; |
6039 | sg->next = sg; | ||
5998 | cpus_or(covered, covered, nodemask); | 6040 | cpus_or(covered, covered, nodemask); |
5999 | prev = sg; | 6041 | prev = sg; |
6000 | 6042 | ||
@@ -6017,15 +6059,15 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
6017 | if (!sg) { | 6059 | if (!sg) { |
6018 | printk(KERN_WARNING | 6060 | printk(KERN_WARNING |
6019 | "Can not alloc domain group for node %d\n", j); | 6061 | "Can not alloc domain group for node %d\n", j); |
6020 | break; | 6062 | goto error; |
6021 | } | 6063 | } |
6022 | sg->cpu_power = 0; | 6064 | sg->cpu_power = 0; |
6023 | sg->cpumask = tmp; | 6065 | sg->cpumask = tmp; |
6066 | sg->next = prev->next; | ||
6024 | cpus_or(covered, covered, tmp); | 6067 | cpus_or(covered, covered, tmp); |
6025 | prev->next = sg; | 6068 | prev->next = sg; |
6026 | prev = sg; | 6069 | prev = sg; |
6027 | } | 6070 | } |
6028 | prev->next = sched_group_nodes[i]; | ||
6029 | } | 6071 | } |
6030 | #endif | 6072 | #endif |
6031 | 6073 | ||
@@ -6088,13 +6130,22 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
6088 | * Tune cache-hot values: | 6130 | * Tune cache-hot values: |
6089 | */ | 6131 | */ |
6090 | calibrate_migration_costs(cpu_map); | 6132 | calibrate_migration_costs(cpu_map); |
6133 | |||
6134 | return 0; | ||
6135 | |||
6136 | #ifdef CONFIG_NUMA | ||
6137 | error: | ||
6138 | free_sched_groups(cpu_map); | ||
6139 | return -ENOMEM; | ||
6140 | #endif | ||
6091 | } | 6141 | } |
6092 | /* | 6142 | /* |
6093 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 6143 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
6094 | */ | 6144 | */ |
6095 | static void arch_init_sched_domains(const cpumask_t *cpu_map) | 6145 | static int arch_init_sched_domains(const cpumask_t *cpu_map) |
6096 | { | 6146 | { |
6097 | cpumask_t cpu_default_map; | 6147 | cpumask_t cpu_default_map; |
6148 | int err; | ||
6098 | 6149 | ||
6099 | /* | 6150 | /* |
6100 | * Setup mask for cpus without special case scheduling requirements. | 6151 | * Setup mask for cpus without special case scheduling requirements. |
@@ -6103,51 +6154,14 @@ static void arch_init_sched_domains(const cpumask_t *cpu_map) | |||
6103 | */ | 6154 | */ |
6104 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); | 6155 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); |
6105 | 6156 | ||
6106 | build_sched_domains(&cpu_default_map); | 6157 | err = build_sched_domains(&cpu_default_map); |
6158 | |||
6159 | return err; | ||
6107 | } | 6160 | } |
6108 | 6161 | ||
6109 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) | 6162 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) |
6110 | { | 6163 | { |
6111 | #ifdef CONFIG_NUMA | 6164 | free_sched_groups(cpu_map); |
6112 | int i; | ||
6113 | int cpu; | ||
6114 | |||
6115 | for_each_cpu_mask(cpu, *cpu_map) { | ||
6116 | struct sched_group *sched_group_allnodes | ||
6117 | = sched_group_allnodes_bycpu[cpu]; | ||
6118 | struct sched_group **sched_group_nodes | ||
6119 | = sched_group_nodes_bycpu[cpu]; | ||
6120 | |||
6121 | if (sched_group_allnodes) { | ||
6122 | kfree(sched_group_allnodes); | ||
6123 | sched_group_allnodes_bycpu[cpu] = NULL; | ||
6124 | } | ||
6125 | |||
6126 | if (!sched_group_nodes) | ||
6127 | continue; | ||
6128 | |||
6129 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
6130 | cpumask_t nodemask = node_to_cpumask(i); | ||
6131 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
6132 | |||
6133 | cpus_and(nodemask, nodemask, *cpu_map); | ||
6134 | if (cpus_empty(nodemask)) | ||
6135 | continue; | ||
6136 | |||
6137 | if (sg == NULL) | ||
6138 | continue; | ||
6139 | sg = sg->next; | ||
6140 | next_sg: | ||
6141 | oldsg = sg; | ||
6142 | sg = sg->next; | ||
6143 | kfree(oldsg); | ||
6144 | if (oldsg != sched_group_nodes[i]) | ||
6145 | goto next_sg; | ||
6146 | } | ||
6147 | kfree(sched_group_nodes); | ||
6148 | sched_group_nodes_bycpu[cpu] = NULL; | ||
6149 | } | ||
6150 | #endif | ||
6151 | } | 6165 | } |
6152 | 6166 | ||
6153 | /* | 6167 | /* |
@@ -6172,9 +6186,10 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
6172 | * correct sched domains | 6186 | * correct sched domains |
6173 | * Call with hotplug lock held | 6187 | * Call with hotplug lock held |
6174 | */ | 6188 | */ |
6175 | void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | 6189 | int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) |
6176 | { | 6190 | { |
6177 | cpumask_t change_map; | 6191 | cpumask_t change_map; |
6192 | int err = 0; | ||
6178 | 6193 | ||
6179 | cpus_and(*partition1, *partition1, cpu_online_map); | 6194 | cpus_and(*partition1, *partition1, cpu_online_map); |
6180 | cpus_and(*partition2, *partition2, cpu_online_map); | 6195 | cpus_and(*partition2, *partition2, cpu_online_map); |
@@ -6183,9 +6198,11 @@ void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | |||
6183 | /* Detach sched domains from all of the affected cpus */ | 6198 | /* Detach sched domains from all of the affected cpus */ |
6184 | detach_destroy_domains(&change_map); | 6199 | detach_destroy_domains(&change_map); |
6185 | if (!cpus_empty(*partition1)) | 6200 | if (!cpus_empty(*partition1)) |
6186 | build_sched_domains(partition1); | 6201 | err = build_sched_domains(partition1); |
6187 | if (!cpus_empty(*partition2)) | 6202 | if (!err && !cpus_empty(*partition2)) |
6188 | build_sched_domains(partition2); | 6203 | err = build_sched_domains(partition2); |
6204 | |||
6205 | return err; | ||
6189 | } | 6206 | } |
6190 | 6207 | ||
6191 | #ifdef CONFIG_HOTPLUG_CPU | 6208 | #ifdef CONFIG_HOTPLUG_CPU |