diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-12-27 05:27:12 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-27 05:27:11 -0500 |
commit | 4baeb964d96d38dff461af5b9d578f0a9ba67617 (patch) | |
tree | d6b2c708de629adac90e08fe6a999e3401ac5bbf /arch/s390 | |
parent | f32269a0d09113b12b68f08dbc5361195176e2dc (diff) |
[S390] topology: cleanup z10 topology handling
Cleanup z10 topology handling. This adds some more code but hopefully
the result is more readable and easier to maintain.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kernel/topology.c | 77 |
1 files changed, 48 insertions, 29 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 621f89e36c8a..2abad3014928 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2007 | 2 | * Copyright IBM Corp. 2007,2011 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
@@ -72,7 +72,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
72 | static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | 72 | static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, |
73 | struct mask_info *book, | 73 | struct mask_info *book, |
74 | struct mask_info *core, | 74 | struct mask_info *core, |
75 | int z10) | 75 | int one_core_per_cpu) |
76 | { | 76 | { |
77 | unsigned int cpu; | 77 | unsigned int cpu; |
78 | 78 | ||
@@ -89,7 +89,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | |||
89 | cpumask_set_cpu(lcpu, &book->mask); | 89 | cpumask_set_cpu(lcpu, &book->mask); |
90 | cpu_book_id[lcpu] = book->id; | 90 | cpu_book_id[lcpu] = book->id; |
91 | cpumask_set_cpu(lcpu, &core->mask); | 91 | cpumask_set_cpu(lcpu, &core->mask); |
92 | if (z10) { | 92 | if (one_core_per_cpu) { |
93 | cpu_core_id[lcpu] = rcpu; | 93 | cpu_core_id[lcpu] = rcpu; |
94 | core = core->next; | 94 | core = core->next; |
95 | } else { | 95 | } else { |
@@ -124,37 +124,15 @@ static union topology_entry *next_tle(union topology_entry *tle) | |||
124 | return (union topology_entry *)((struct topology_container *)tle + 1); | 124 | return (union topology_entry *)((struct topology_container *)tle + 1); |
125 | } | 125 | } |
126 | 126 | ||
127 | static void tl_to_cores(struct sysinfo_15_1_x *info) | 127 | static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) |
128 | { | 128 | { |
129 | struct mask_info *core = &core_info; | 129 | struct mask_info *core = &core_info; |
130 | struct mask_info *book = &book_info; | 130 | struct mask_info *book = &book_info; |
131 | union topology_entry *tle, *end; | 131 | union topology_entry *tle, *end; |
132 | struct cpuid cpu_id; | ||
133 | int z10 = 0; | ||
134 | 132 | ||
135 | get_cpu_id(&cpu_id); | ||
136 | z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098; | ||
137 | spin_lock_irq(&topology_lock); | ||
138 | clear_masks(); | ||
139 | tle = info->tle; | 133 | tle = info->tle; |
140 | end = (union topology_entry *)((unsigned long)info + info->length); | 134 | end = (union topology_entry *)((unsigned long)info + info->length); |
141 | while (tle < end) { | 135 | while (tle < end) { |
142 | if (z10) { | ||
143 | switch (tle->nl) { | ||
144 | case 1: | ||
145 | book = book->next; | ||
146 | book->id = tle->container.id; | ||
147 | break; | ||
148 | case 0: | ||
149 | core = add_cpus_to_mask(&tle->cpu, book, core, z10); | ||
150 | break; | ||
151 | default: | ||
152 | clear_masks(); | ||
153 | goto out; | ||
154 | } | ||
155 | tle = next_tle(tle); | ||
156 | continue; | ||
157 | } | ||
158 | switch (tle->nl) { | 136 | switch (tle->nl) { |
159 | case 2: | 137 | case 2: |
160 | book = book->next; | 138 | book = book->next; |
@@ -165,15 +143,56 @@ static void tl_to_cores(struct sysinfo_15_1_x *info) | |||
165 | core->id = tle->container.id; | 143 | core->id = tle->container.id; |
166 | break; | 144 | break; |
167 | case 0: | 145 | case 0: |
168 | add_cpus_to_mask(&tle->cpu, book, core, z10); | 146 | add_cpus_to_mask(&tle->cpu, book, core, 0); |
169 | break; | 147 | break; |
170 | default: | 148 | default: |
171 | clear_masks(); | 149 | clear_masks(); |
172 | goto out; | 150 | return; |
173 | } | 151 | } |
174 | tle = next_tle(tle); | 152 | tle = next_tle(tle); |
175 | } | 153 | } |
176 | out: | 154 | } |
155 | |||
156 | static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) | ||
157 | { | ||
158 | struct mask_info *core = &core_info; | ||
159 | struct mask_info *book = &book_info; | ||
160 | union topology_entry *tle, *end; | ||
161 | |||
162 | tle = info->tle; | ||
163 | end = (union topology_entry *)((unsigned long)info + info->length); | ||
164 | while (tle < end) { | ||
165 | switch (tle->nl) { | ||
166 | case 1: | ||
167 | book = book->next; | ||
168 | book->id = tle->container.id; | ||
169 | break; | ||
170 | case 0: | ||
171 | core = add_cpus_to_mask(&tle->cpu, book, core, 1); | ||
172 | break; | ||
173 | default: | ||
174 | clear_masks(); | ||
175 | return; | ||
176 | } | ||
177 | tle = next_tle(tle); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | static void tl_to_cores(struct sysinfo_15_1_x *info) | ||
182 | { | ||
183 | struct cpuid cpu_id; | ||
184 | |||
185 | get_cpu_id(&cpu_id); | ||
186 | spin_lock_irq(&topology_lock); | ||
187 | clear_masks(); | ||
188 | switch (cpu_id.machine) { | ||
189 | case 0x2097: | ||
190 | case 0x2098: | ||
191 | __tl_to_cores_z10(info); | ||
192 | break; | ||
193 | default: | ||
194 | __tl_to_cores_generic(info); | ||
195 | } | ||
177 | spin_unlock_irq(&topology_lock); | 196 | spin_unlock_irq(&topology_lock); |
178 | } | 197 | } |
179 | 198 | ||