diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-07 08:10:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-11 08:09:31 -0400 |
commit | 54ab4ff4316eb329d2c1acc110fbc623d2966931 (patch) | |
tree | 61632ef9c87adb930e96e2a63d2027a571b58038 /kernel/sched.c | |
parent | d069b916f7b50021d41d6ce498f86da32a7afaec (diff) |
sched: Move sched domain storage into the topology list
In order to remove the last dependency on the statid domain levels,
move the sd_data storage into the topology structure.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.924926412@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 129 |
1 files changed, 77 insertions, 52 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 38bc53b576a7..3231e1997426 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6837,7 +6837,6 @@ struct sd_data { | |||
6837 | 6837 | ||
6838 | struct s_data { | 6838 | struct s_data { |
6839 | struct sched_domain ** __percpu sd; | 6839 | struct sched_domain ** __percpu sd; |
6840 | struct sd_data sdd[SD_LV_MAX]; | ||
6841 | struct root_domain *rd; | 6840 | struct root_domain *rd; |
6842 | }; | 6841 | }; |
6843 | 6842 | ||
@@ -6848,12 +6847,15 @@ enum s_alloc { | |||
6848 | sa_none, | 6847 | sa_none, |
6849 | }; | 6848 | }; |
6850 | 6849 | ||
6851 | typedef struct sched_domain *(*sched_domain_init_f)(struct s_data *d, int cpu); | 6850 | struct sched_domain_topology_level; |
6851 | |||
6852 | typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); | ||
6852 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); | 6853 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); |
6853 | 6854 | ||
6854 | struct sched_domain_topology_level { | 6855 | struct sched_domain_topology_level { |
6855 | sched_domain_init_f init; | 6856 | sched_domain_init_f init; |
6856 | sched_domain_mask_f mask; | 6857 | sched_domain_mask_f mask; |
6858 | struct sd_data data; | ||
6857 | }; | 6859 | }; |
6858 | 6860 | ||
6859 | /* | 6861 | /* |
@@ -6958,15 +6960,16 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
6958 | # define SD_INIT_NAME(sd, type) do { } while (0) | 6960 | # define SD_INIT_NAME(sd, type) do { } while (0) |
6959 | #endif | 6961 | #endif |
6960 | 6962 | ||
6961 | #define SD_INIT_FUNC(type) \ | 6963 | #define SD_INIT_FUNC(type) \ |
6962 | static noinline struct sched_domain *sd_init_##type(struct s_data *d, int cpu) \ | 6964 | static noinline struct sched_domain * \ |
6963 | { \ | 6965 | sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ |
6964 | struct sched_domain *sd = *per_cpu_ptr(d->sdd[SD_LV_##type].sd, cpu); \ | 6966 | { \ |
6965 | *sd = SD_##type##_INIT; \ | 6967 | struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ |
6966 | sd->level = SD_LV_##type; \ | 6968 | *sd = SD_##type##_INIT; \ |
6967 | SD_INIT_NAME(sd, type); \ | 6969 | sd->level = SD_LV_##type; \ |
6968 | sd->private = &d->sdd[SD_LV_##type]; \ | 6970 | SD_INIT_NAME(sd, type); \ |
6969 | return sd; \ | 6971 | sd->private = &tl->data; \ |
6972 | return sd; \ | ||
6970 | } | 6973 | } |
6971 | 6974 | ||
6972 | SD_INIT_FUNC(CPU) | 6975 | SD_INIT_FUNC(CPU) |
@@ -7019,11 +7022,12 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7019 | } | 7022 | } |
7020 | } | 7023 | } |
7021 | 7024 | ||
7025 | static void __sdt_free(const struct cpumask *cpu_map); | ||
7026 | static int __sdt_alloc(const struct cpumask *cpu_map); | ||
7027 | |||
7022 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | 7028 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
7023 | const struct cpumask *cpu_map) | 7029 | const struct cpumask *cpu_map) |
7024 | { | 7030 | { |
7025 | int i, j; | ||
7026 | |||
7027 | switch (what) { | 7031 | switch (what) { |
7028 | case sa_rootdomain: | 7032 | case sa_rootdomain: |
7029 | if (!atomic_read(&d->rd->refcount)) | 7033 | if (!atomic_read(&d->rd->refcount)) |
@@ -7031,14 +7035,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
7031 | case sa_sd: | 7035 | case sa_sd: |
7032 | free_percpu(d->sd); /* fall through */ | 7036 | free_percpu(d->sd); /* fall through */ |
7033 | case sa_sd_storage: | 7037 | case sa_sd_storage: |
7034 | for (i = 0; i < SD_LV_MAX; i++) { | 7038 | __sdt_free(cpu_map); /* fall through */ |
7035 | for_each_cpu(j, cpu_map) { | ||
7036 | kfree(*per_cpu_ptr(d->sdd[i].sd, j)); | ||
7037 | kfree(*per_cpu_ptr(d->sdd[i].sg, j)); | ||
7038 | } | ||
7039 | free_percpu(d->sdd[i].sd); | ||
7040 | free_percpu(d->sdd[i].sg); | ||
7041 | } /* fall through */ | ||
7042 | case sa_none: | 7039 | case sa_none: |
7043 | break; | 7040 | break; |
7044 | } | 7041 | } |
@@ -7047,38 +7044,10 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
7047 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | 7044 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, |
7048 | const struct cpumask *cpu_map) | 7045 | const struct cpumask *cpu_map) |
7049 | { | 7046 | { |
7050 | int i, j; | ||
7051 | |||
7052 | memset(d, 0, sizeof(*d)); | 7047 | memset(d, 0, sizeof(*d)); |
7053 | 7048 | ||
7054 | for (i = 0; i < SD_LV_MAX; i++) { | 7049 | if (__sdt_alloc(cpu_map)) |
7055 | d->sdd[i].sd = alloc_percpu(struct sched_domain *); | 7050 | return sa_sd_storage; |
7056 | if (!d->sdd[i].sd) | ||
7057 | return sa_sd_storage; | ||
7058 | |||
7059 | d->sdd[i].sg = alloc_percpu(struct sched_group *); | ||
7060 | if (!d->sdd[i].sg) | ||
7061 | return sa_sd_storage; | ||
7062 | |||
7063 | for_each_cpu(j, cpu_map) { | ||
7064 | struct sched_domain *sd; | ||
7065 | struct sched_group *sg; | ||
7066 | |||
7067 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), | ||
7068 | GFP_KERNEL, cpu_to_node(j)); | ||
7069 | if (!sd) | ||
7070 | return sa_sd_storage; | ||
7071 | |||
7072 | *per_cpu_ptr(d->sdd[i].sd, j) = sd; | ||
7073 | |||
7074 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | ||
7075 | GFP_KERNEL, cpu_to_node(j)); | ||
7076 | if (!sg) | ||
7077 | return sa_sd_storage; | ||
7078 | |||
7079 | *per_cpu_ptr(d->sdd[i].sg, j) = sg; | ||
7080 | } | ||
7081 | } | ||
7082 | d->sd = alloc_percpu(struct sched_domain *); | 7051 | d->sd = alloc_percpu(struct sched_domain *); |
7083 | if (!d->sd) | 7052 | if (!d->sd) |
7084 | return sa_sd_storage; | 7053 | return sa_sd_storage; |
@@ -7137,12 +7106,68 @@ static struct sched_domain_topology_level default_topology[] = { | |||
7137 | 7106 | ||
7138 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; | 7107 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; |
7139 | 7108 | ||
7109 | static int __sdt_alloc(const struct cpumask *cpu_map) | ||
7110 | { | ||
7111 | struct sched_domain_topology_level *tl; | ||
7112 | int j; | ||
7113 | |||
7114 | for (tl = sched_domain_topology; tl->init; tl++) { | ||
7115 | struct sd_data *sdd = &tl->data; | ||
7116 | |||
7117 | sdd->sd = alloc_percpu(struct sched_domain *); | ||
7118 | if (!sdd->sd) | ||
7119 | return -ENOMEM; | ||
7120 | |||
7121 | sdd->sg = alloc_percpu(struct sched_group *); | ||
7122 | if (!sdd->sg) | ||
7123 | return -ENOMEM; | ||
7124 | |||
7125 | for_each_cpu(j, cpu_map) { | ||
7126 | struct sched_domain *sd; | ||
7127 | struct sched_group *sg; | ||
7128 | |||
7129 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), | ||
7130 | GFP_KERNEL, cpu_to_node(j)); | ||
7131 | if (!sd) | ||
7132 | return -ENOMEM; | ||
7133 | |||
7134 | *per_cpu_ptr(sdd->sd, j) = sd; | ||
7135 | |||
7136 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | ||
7137 | GFP_KERNEL, cpu_to_node(j)); | ||
7138 | if (!sg) | ||
7139 | return -ENOMEM; | ||
7140 | |||
7141 | *per_cpu_ptr(sdd->sg, j) = sg; | ||
7142 | } | ||
7143 | } | ||
7144 | |||
7145 | return 0; | ||
7146 | } | ||
7147 | |||
7148 | static void __sdt_free(const struct cpumask *cpu_map) | ||
7149 | { | ||
7150 | struct sched_domain_topology_level *tl; | ||
7151 | int j; | ||
7152 | |||
7153 | for (tl = sched_domain_topology; tl->init; tl++) { | ||
7154 | struct sd_data *sdd = &tl->data; | ||
7155 | |||
7156 | for_each_cpu(j, cpu_map) { | ||
7157 | kfree(*per_cpu_ptr(sdd->sd, j)); | ||
7158 | kfree(*per_cpu_ptr(sdd->sg, j)); | ||
7159 | } | ||
7160 | free_percpu(sdd->sd); | ||
7161 | free_percpu(sdd->sg); | ||
7162 | } | ||
7163 | } | ||
7164 | |||
7140 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | 7165 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, |
7141 | struct s_data *d, const struct cpumask *cpu_map, | 7166 | struct s_data *d, const struct cpumask *cpu_map, |
7142 | struct sched_domain_attr *attr, struct sched_domain *child, | 7167 | struct sched_domain_attr *attr, struct sched_domain *child, |
7143 | int cpu) | 7168 | int cpu) |
7144 | { | 7169 | { |
7145 | struct sched_domain *sd = tl->init(d, cpu); | 7170 | struct sched_domain *sd = tl->init(tl, cpu); |
7146 | if (!sd) | 7171 | if (!sd) |
7147 | return child; | 7172 | return child; |
7148 | 7173 | ||