diff options
author | Srivatsa Vaddagiri <vatsa@in.ibm.com> | 2006-06-27 05:54:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-27 20:32:45 -0400 |
commit | 369381694ddcf03f1de403501c8b97099b5109ec (patch) | |
tree | 3945620740cc1687c8487356bf4be90394695702 /kernel/sched.c | |
parent | 15f0b676a482fb4067cfe25de417c417dda3440a (diff) |
[PATCH] sched_domai: Allocate sched_group structures dynamically
As explained here:
http://marc.theaimsgroup.com/?l=linux-kernel&m=114327539012323&w=2
there is a problem with sharing sched_group structures between two
separate sched_group structures for different sched_domains.
The patch has been tested and found to avoid the kernel lockup problem
described in above URL.
Signed-off-by: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 48 |
1 files changed, 43 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ee4211bd40c5..122b75584a13 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5747,7 +5747,7 @@ static int cpu_to_cpu_group(int cpu) | |||
5747 | 5747 | ||
5748 | #ifdef CONFIG_SCHED_MC | 5748 | #ifdef CONFIG_SCHED_MC |
5749 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 5749 | static DEFINE_PER_CPU(struct sched_domain, core_domains); |
5750 | static struct sched_group sched_group_core[NR_CPUS]; | 5750 | static struct sched_group *sched_group_core_bycpu[NR_CPUS]; |
5751 | #endif | 5751 | #endif |
5752 | 5752 | ||
5753 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 5753 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
@@ -5763,7 +5763,7 @@ static int cpu_to_core_group(int cpu) | |||
5763 | #endif | 5763 | #endif |
5764 | 5764 | ||
5765 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 5765 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
5766 | static struct sched_group sched_group_phys[NR_CPUS]; | 5766 | static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; |
5767 | static int cpu_to_phys_group(int cpu) | 5767 | static int cpu_to_phys_group(int cpu) |
5768 | { | 5768 | { |
5769 | #if defined(CONFIG_SCHED_MC) | 5769 | #if defined(CONFIG_SCHED_MC) |
@@ -5823,9 +5823,9 @@ next_sg: | |||
5823 | /* Free memory allocated for various sched_group structures */ | 5823 | /* Free memory allocated for various sched_group structures */ |
5824 | static void free_sched_groups(const cpumask_t *cpu_map) | 5824 | static void free_sched_groups(const cpumask_t *cpu_map) |
5825 | { | 5825 | { |
5826 | int cpu; | ||
5826 | #ifdef CONFIG_NUMA | 5827 | #ifdef CONFIG_NUMA |
5827 | int i; | 5828 | int i; |
5828 | int cpu; | ||
5829 | 5829 | ||
5830 | for_each_cpu_mask(cpu, *cpu_map) { | 5830 | for_each_cpu_mask(cpu, *cpu_map) { |
5831 | struct sched_group *sched_group_allnodes | 5831 | struct sched_group *sched_group_allnodes |
@@ -5863,6 +5863,18 @@ next_sg: | |||
5863 | sched_group_nodes_bycpu[cpu] = NULL; | 5863 | sched_group_nodes_bycpu[cpu] = NULL; |
5864 | } | 5864 | } |
5865 | #endif | 5865 | #endif |
5866 | for_each_cpu_mask(cpu, *cpu_map) { | ||
5867 | if (sched_group_phys_bycpu[cpu]) { | ||
5868 | kfree(sched_group_phys_bycpu[cpu]); | ||
5869 | sched_group_phys_bycpu[cpu] = NULL; | ||
5870 | } | ||
5871 | #ifdef CONFIG_SCHED_MC | ||
5872 | if (sched_group_core_bycpu[cpu]) { | ||
5873 | kfree(sched_group_core_bycpu[cpu]); | ||
5874 | sched_group_core_bycpu[cpu] = NULL; | ||
5875 | } | ||
5876 | #endif | ||
5877 | } | ||
5866 | } | 5878 | } |
5867 | 5879 | ||
5868 | /* | 5880 | /* |
@@ -5872,6 +5884,10 @@ next_sg: | |||
5872 | static int build_sched_domains(const cpumask_t *cpu_map) | 5884 | static int build_sched_domains(const cpumask_t *cpu_map) |
5873 | { | 5885 | { |
5874 | int i; | 5886 | int i; |
5887 | struct sched_group *sched_group_phys = NULL; | ||
5888 | #ifdef CONFIG_SCHED_MC | ||
5889 | struct sched_group *sched_group_core = NULL; | ||
5890 | #endif | ||
5875 | #ifdef CONFIG_NUMA | 5891 | #ifdef CONFIG_NUMA |
5876 | struct sched_group **sched_group_nodes = NULL; | 5892 | struct sched_group **sched_group_nodes = NULL; |
5877 | struct sched_group *sched_group_allnodes = NULL; | 5893 | struct sched_group *sched_group_allnodes = NULL; |
@@ -5930,6 +5946,18 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
5930 | cpus_and(sd->span, sd->span, *cpu_map); | 5946 | cpus_and(sd->span, sd->span, *cpu_map); |
5931 | #endif | 5947 | #endif |
5932 | 5948 | ||
5949 | if (!sched_group_phys) { | ||
5950 | sched_group_phys | ||
5951 | = kmalloc(sizeof(struct sched_group) * NR_CPUS, | ||
5952 | GFP_KERNEL); | ||
5953 | if (!sched_group_phys) { | ||
5954 | printk (KERN_WARNING "Can not alloc phys sched" | ||
5955 | "group\n"); | ||
5956 | goto error; | ||
5957 | } | ||
5958 | sched_group_phys_bycpu[i] = sched_group_phys; | ||
5959 | } | ||
5960 | |||
5933 | p = sd; | 5961 | p = sd; |
5934 | sd = &per_cpu(phys_domains, i); | 5962 | sd = &per_cpu(phys_domains, i); |
5935 | group = cpu_to_phys_group(i); | 5963 | group = cpu_to_phys_group(i); |
@@ -5939,6 +5967,18 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
5939 | sd->groups = &sched_group_phys[group]; | 5967 | sd->groups = &sched_group_phys[group]; |
5940 | 5968 | ||
5941 | #ifdef CONFIG_SCHED_MC | 5969 | #ifdef CONFIG_SCHED_MC |
5970 | if (!sched_group_core) { | ||
5971 | sched_group_core | ||
5972 | = kmalloc(sizeof(struct sched_group) * NR_CPUS, | ||
5973 | GFP_KERNEL); | ||
5974 | if (!sched_group_core) { | ||
5975 | printk (KERN_WARNING "Can not alloc core sched" | ||
5976 | "group\n"); | ||
5977 | goto error; | ||
5978 | } | ||
5979 | sched_group_core_bycpu[i] = sched_group_core; | ||
5980 | } | ||
5981 | |||
5942 | p = sd; | 5982 | p = sd; |
5943 | sd = &per_cpu(core_domains, i); | 5983 | sd = &per_cpu(core_domains, i); |
5944 | group = cpu_to_core_group(i); | 5984 | group = cpu_to_core_group(i); |
@@ -6134,11 +6174,9 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
6134 | 6174 | ||
6135 | return 0; | 6175 | return 0; |
6136 | 6176 | ||
6137 | #ifdef CONFIG_NUMA | ||
6138 | error: | 6177 | error: |
6139 | free_sched_groups(cpu_map); | 6178 | free_sched_groups(cpu_map); |
6140 | return -ENOMEM; | 6179 | return -ENOMEM; |
6141 | #endif | ||
6142 | } | 6180 | } |
6143 | /* | 6181 | /* |
6144 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 6182 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |