aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2016-05-25 03:53:07 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-06-13 09:58:27 -0400
commit86d18a55dd66aea8bb8fffb0334557eb4973ea52 (patch)
tree3780fbefc899aaabb7c7af0edaa94e9b5761e939
parentadac0f1e8c08548d82a48c9913ebc9787f946440 (diff)
s390/topology: remove z10 special handling
I don't have a z10 to test this anymore, so I have no idea if the code works at all or even crashes. I can try to emulate, but it is just guess work. Nor do we know if the z10 special handling is performance wise still better than the generic handling. There have been a lot of changes to the scheduler. Therefore let's play safe and remove the special handling. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/kernel/topology.c64
1 files changed, 8 insertions, 56 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 44745e751c3a..e959c02e0cac 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -80,11 +80,10 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
80 return mask; 80 return mask;
81} 81}
82 82
83static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, 83static void add_cpus_to_mask(struct topology_core *tl_core,
84 struct mask_info *drawer, 84 struct mask_info *drawer,
85 struct mask_info *book, 85 struct mask_info *book,
86 struct mask_info *socket, 86 struct mask_info *socket)
87 int one_socket_per_cpu)
88{ 87{
89 struct cpu_topology_s390 *topo; 88 struct cpu_topology_s390 *topo;
90 unsigned int core; 89 unsigned int core;
@@ -101,21 +100,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
101 topo = &per_cpu(cpu_topology, lcpu + i); 100 topo = &per_cpu(cpu_topology, lcpu + i);
102 topo->drawer_id = drawer->id; 101 topo->drawer_id = drawer->id;
103 topo->book_id = book->id; 102 topo->book_id = book->id;
103 topo->socket_id = socket->id;
104 topo->core_id = rcore; 104 topo->core_id = rcore;
105 topo->thread_id = lcpu + i; 105 topo->thread_id = lcpu + i;
106 cpumask_set_cpu(lcpu + i, &drawer->mask); 106 cpumask_set_cpu(lcpu + i, &drawer->mask);
107 cpumask_set_cpu(lcpu + i, &book->mask); 107 cpumask_set_cpu(lcpu + i, &book->mask);
108 cpumask_set_cpu(lcpu + i, &socket->mask); 108 cpumask_set_cpu(lcpu + i, &socket->mask);
109 if (one_socket_per_cpu)
110 topo->socket_id = rcore;
111 else
112 topo->socket_id = socket->id;
113 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 109 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
114 } 110 }
115 if (one_socket_per_cpu)
116 socket = socket->next;
117 } 111 }
118 return socket;
119} 112}
120 113
121static void clear_masks(void) 114static void clear_masks(void)
@@ -146,13 +139,14 @@ static union topology_entry *next_tle(union topology_entry *tle)
146 return (union topology_entry *)((struct topology_container *)tle + 1); 139 return (union topology_entry *)((struct topology_container *)tle + 1);
147} 140}
148 141
149static void __tl_to_masks_generic(struct sysinfo_15_1_x *info) 142static void tl_to_masks(struct sysinfo_15_1_x *info)
150{ 143{
151 struct mask_info *socket = &socket_info; 144 struct mask_info *socket = &socket_info;
152 struct mask_info *book = &book_info; 145 struct mask_info *book = &book_info;
153 struct mask_info *drawer = &drawer_info; 146 struct mask_info *drawer = &drawer_info;
154 union topology_entry *tle, *end; 147 union topology_entry *tle, *end;
155 148
149 clear_masks();
156 tle = info->tle; 150 tle = info->tle;
157 end = (union topology_entry *)((unsigned long)info + info->length); 151 end = (union topology_entry *)((unsigned long)info + info->length);
158 while (tle < end) { 152 while (tle < end) {
@@ -170,33 +164,7 @@ static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
170 socket->id = tle->container.id; 164 socket->id = tle->container.id;
171 break; 165 break;
172 case 0: 166 case 0:
173 add_cpus_to_mask(&tle->cpu, drawer, book, socket, 0); 167 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
174 break;
175 default:
176 clear_masks();
177 return;
178 }
179 tle = next_tle(tle);
180 }
181}
182
183static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
184{
185 struct mask_info *socket = &socket_info;
186 struct mask_info *book = &book_info;
187 struct mask_info *drawer = &drawer_info;
188 union topology_entry *tle, *end;
189
190 tle = info->tle;
191 end = (union topology_entry *)((unsigned long)info + info->length);
192 while (tle < end) {
193 switch (tle->nl) {
194 case 1:
195 book = book->next;
196 book->id = tle->container.id;
197 break;
198 case 0:
199 socket = add_cpus_to_mask(&tle->cpu, drawer, book, socket, 1);
200 break; 168 break;
201 default: 169 default:
202 clear_masks(); 170 clear_masks();
@@ -206,22 +174,6 @@ static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
206 } 174 }
207} 175}
208 176
209static void tl_to_masks(struct sysinfo_15_1_x *info)
210{
211 struct cpuid cpu_id;
212
213 get_cpu_id(&cpu_id);
214 clear_masks();
215 switch (cpu_id.machine) {
216 case 0x2097:
217 case 0x2098:
218 __tl_to_masks_z10(info);
219 break;
220 default:
221 __tl_to_masks_generic(info);
222 }
223}
224
225static void topology_update_polarization_simple(void) 177static void topology_update_polarization_simple(void)
226{ 178{
227 int cpu; 179 int cpu;