aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/topology.c')
-rw-r--r--arch/s390/kernel/topology.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 12b39b3d9c38..661a07217057 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -9,6 +9,7 @@
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/bootmem.h> 10#include <linux/bootmem.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/kthread.h>
12#include <linux/workqueue.h> 13#include <linux/workqueue.h>
13#include <linux/cpu.h> 14#include <linux/cpu.h>
14#include <linux/smp.h> 15#include <linux/smp.h>
@@ -66,6 +67,8 @@ static struct timer_list topology_timer;
66static void set_topology_timer(void); 67static void set_topology_timer(void);
67static DECLARE_WORK(topology_work, topology_work_fn); 68static DECLARE_WORK(topology_work, topology_work_fn);
68 69
70cpumask_t cpu_core_map[NR_CPUS];
71
69cpumask_t cpu_coregroup_map(unsigned int cpu) 72cpumask_t cpu_coregroup_map(unsigned int cpu)
70{ 73{
71 struct core_info *core = &core_info; 74 struct core_info *core = &core_info;
@@ -199,6 +202,14 @@ int topology_set_cpu_management(int fc)
199 return rc; 202 return rc;
200} 203}
201 204
205static void update_cpu_core_map(void)
206{
207 int cpu;
208
209 for_each_present_cpu(cpu)
210 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
211}
212
202void arch_update_cpu_topology(void) 213void arch_update_cpu_topology(void)
203{ 214{
204 struct tl_info *info = tl_info; 215 struct tl_info *info = tl_info;
@@ -206,20 +217,33 @@ void arch_update_cpu_topology(void)
206 int cpu; 217 int cpu;
207 218
208 if (!machine_has_topology) { 219 if (!machine_has_topology) {
220 update_cpu_core_map();
209 topology_update_polarization_simple(); 221 topology_update_polarization_simple();
210 return; 222 return;
211 } 223 }
212 stsi(info, 15, 1, 2); 224 stsi(info, 15, 1, 2);
213 tl_to_cores(info); 225 tl_to_cores(info);
226 update_cpu_core_map();
214 for_each_online_cpu(cpu) { 227 for_each_online_cpu(cpu) {
215 sysdev = get_cpu_sysdev(cpu); 228 sysdev = get_cpu_sysdev(cpu);
216 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); 229 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
217 } 230 }
218} 231}
219 232
220static void topology_work_fn(struct work_struct *work) 233static int topology_kthread(void *data)
221{ 234{
222 arch_reinit_sched_domains(); 235 arch_reinit_sched_domains();
236 return 0;
237}
238
239static void topology_work_fn(struct work_struct *work)
240{
241 /* We can't call arch_reinit_sched_domains() from a multi-threaded
242 * workqueue context since it may deadlock in case of cpu hotplug.
243 * So we have to create a kernel thread in order to call
244 * arch_reinit_sched_domains().
245 */
246 kthread_run(topology_kthread, NULL, "topology_update");
223} 247}
224 248
225void topology_schedule_update(void) 249void topology_schedule_update(void)
@@ -251,20 +275,23 @@ static int __init init_topology_update(void)
251{ 275{
252 int rc; 276 int rc;
253 277
278 rc = 0;
254 if (!machine_has_topology) { 279 if (!machine_has_topology) {
255 topology_update_polarization_simple(); 280 topology_update_polarization_simple();
256 return 0; 281 goto out;
257 } 282 }
258 init_timer_deferrable(&topology_timer); 283 init_timer_deferrable(&topology_timer);
259 if (machine_has_topology_irq) { 284 if (machine_has_topology_irq) {
260 rc = register_external_interrupt(0x2005, topology_interrupt); 285 rc = register_external_interrupt(0x2005, topology_interrupt);
261 if (rc) 286 if (rc)
262 return rc; 287 goto out;
263 ctl_set_bit(0, 8); 288 ctl_set_bit(0, 8);
264 } 289 }
265 else 290 else
266 set_topology_timer(); 291 set_topology_timer();
267 return 0; 292out:
293 update_cpu_core_map();
294 return rc;
268} 295}
269__initcall(init_topology_update); 296__initcall(init_topology_update);
270 297