aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2016-12-03 03:50:21 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-12-07 01:23:25 -0500
commit8c9105802235c28b03359d779cbd0557b7b66e70 (patch)
treef713b6e57cf9f2dc6af152e17a654b787eb167e7
parent30fc4ca2a8ab508d160a917b89b7e1c27f893354 (diff)
s390/numa: establish cpu to node mapping early
Initialize the cpu topology and therefore also the cpu to node mapping much earlier. Fixes this warning and subsequent crashes when using the fake numa emulation mode on s390: WARNING: CPU: 0 PID: 1 at include/linux/cpumask.h:121 select_task_rq+0xe6/0x1a8 CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.6.0-rc6-00001-ge9d867a67fd0-dirty #28 task: 00000001dd270008 ti: 00000001eccb4000 task.ti: 00000001eccb4000 Krnl PSW : 0404c00180000000 0000000000176c56 (select_task_rq+0xe6/0x1a8) R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 RI:0 EA:3 Call Trace: ([<0000000000176c30>] select_task_rq+0xc0/0x1a8) ([<0000000000177d64>] try_to_wake_up+0x2e4/0x478) ([<000000000015d46c>] create_worker+0x174/0x1c0) ([<0000000000161a98>] alloc_unbound_pwq+0x360/0x438) ([<0000000000162550>] apply_wqattrs_prepare+0x200/0x2a0) ([<000000000016266a>] apply_workqueue_attrs_locked+0x7a/0xb0) ([<0000000000162af0>] apply_workqueue_attrs+0x50/0x78) ([<000000000016441c>] __alloc_workqueue_key+0x304/0x520) ([<0000000000ee3706>] default_bdi_init+0x3e/0x70) ([<0000000000100270>] do_one_initcall+0x140/0x1d8) ([<0000000000ec9da8>] kernel_init_freeable+0x220/0x2d8) ([<0000000000984a7a>] kernel_init+0x2a/0x150) ([<00000000009913fa>] kernel_thread_starter+0x6/0xc) ([<00000000009913f4>] kernel_thread_starter+0x0/0xc) Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/topology.c33
-rw-r--r--arch/s390/numa/mode_emu.c9
-rw-r--r--arch/s390/numa/toptree.c16
5 files changed, 44 insertions, 18 deletions
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index bc6f45421c98..fa1bfce10370 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -23,6 +23,7 @@ struct cpu_topology_s390 {
23}; 23};
24 24
25extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; 25extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
26extern cpumask_t cpus_with_topology;
26 27
27#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 28#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
28#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) 29#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
@@ -36,6 +37,7 @@ extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
36 37
37#define mc_capable() 1 38#define mc_capable() 1
38 39
40void topology_init_early(void);
39int topology_cpu_init(struct cpu *); 41int topology_cpu_init(struct cpu *);
40int topology_set_cpu_management(int fc); 42int topology_set_cpu_management(int fc);
41void topology_schedule_update(void); 43void topology_schedule_update(void);
@@ -45,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
45 47
46#else /* CONFIG_SCHED_TOPOLOGY */ 48#else /* CONFIG_SCHED_TOPOLOGY */
47 49
50static inline void topology_init_early(void) { }
48static inline void topology_schedule_update(void) { } 51static inline void topology_schedule_update(void) { }
49static inline int topology_cpu_init(struct cpu *cpu) { return 0; } 52static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
50static inline void topology_expect_change(void) { } 53static inline void topology_expect_change(void) { }
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index aba3c5ce1559..adfac9f0a89f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -924,6 +924,7 @@ void __init setup_arch(char **cmdline_p)
924 cpu_init(); 924 cpu_init();
925 numa_setup(); 925 numa_setup();
926 smp_detect_cpus(); 926 smp_detect_cpus();
927 topology_init_early();
927 928
928 /* 929 /*
929 * Create kernel page tables and switch to virtual addressing. 930 * Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 7169d112c91a..93dcbae1e98d 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,6 +7,7 @@
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 8
9#include <linux/workqueue.h> 9#include <linux/workqueue.h>
10#include <linux/bootmem.h>
10#include <linux/cpuset.h> 11#include <linux/cpuset.h>
11#include <linux/device.h> 12#include <linux/device.h>
12#include <linux/export.h> 13#include <linux/export.h>
@@ -51,6 +52,8 @@ static struct mask_info drawer_info;
51struct cpu_topology_s390 cpu_topology[NR_CPUS]; 52struct cpu_topology_s390 cpu_topology[NR_CPUS];
52EXPORT_SYMBOL_GPL(cpu_topology); 53EXPORT_SYMBOL_GPL(cpu_topology);
53 54
55cpumask_t cpus_with_topology;
56
54static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 57static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
55{ 58{
56 cpumask_t mask; 59 cpumask_t mask;
@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
106 cpumask_set_cpu(lcpu + i, &drawer->mask); 109 cpumask_set_cpu(lcpu + i, &drawer->mask);
107 cpumask_set_cpu(lcpu + i, &book->mask); 110 cpumask_set_cpu(lcpu + i, &book->mask);
108 cpumask_set_cpu(lcpu + i, &socket->mask); 111 cpumask_set_cpu(lcpu + i, &socket->mask);
112 cpumask_set_cpu(lcpu + i, &cpus_with_topology);
109 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 113 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
110 } 114 }
111 } 115 }
@@ -231,6 +235,8 @@ static void update_cpu_masks(void)
231 topo->socket_id = cpu; 235 topo->socket_id = cpu;
232 topo->book_id = cpu; 236 topo->book_id = cpu;
233 topo->drawer_id = cpu; 237 topo->drawer_id = cpu;
238 if (cpu_present(cpu))
239 cpumask_set_cpu(cpu, &cpus_with_topology);
234 } 240 }
235 } 241 }
236 numa_update_cpu_topology(); 242 numa_update_cpu_topology();
@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
241 stsi(info, 15, 1, min(topology_max_mnest, 4)); 247 stsi(info, 15, 1, min(topology_max_mnest, 4));
242} 248}
243 249
244int arch_update_cpu_topology(void) 250static int __arch_update_cpu_topology(void)
245{ 251{
246 struct sysinfo_15_1_x *info = tl_info; 252 struct sysinfo_15_1_x *info = tl_info;
247 struct device *dev; 253 int rc = 0;
248 int cpu, rc = 0;
249 254
255 cpumask_clear(&cpus_with_topology);
250 if (MACHINE_HAS_TOPOLOGY) { 256 if (MACHINE_HAS_TOPOLOGY) {
251 rc = 1; 257 rc = 1;
252 store_topology(info); 258 store_topology(info);
@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
255 update_cpu_masks(); 261 update_cpu_masks();
256 if (!MACHINE_HAS_TOPOLOGY) 262 if (!MACHINE_HAS_TOPOLOGY)
257 topology_update_polarization_simple(); 263 topology_update_polarization_simple();
264 return rc;
265}
266
267int arch_update_cpu_topology(void)
268{
269 struct device *dev;
270 int cpu, rc;
271
272 rc = __arch_update_cpu_topology();
258 for_each_online_cpu(cpu) { 273 for_each_online_cpu(cpu) {
259 dev = get_cpu_device(cpu); 274 dev = get_cpu_device(cpu);
260 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 275 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -438,20 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
438 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; 453 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
439 nr_masks = max(nr_masks, 1); 454 nr_masks = max(nr_masks, 1);
440 for (i = 0; i < nr_masks; i++) { 455 for (i = 0; i < nr_masks; i++) {
441 mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL); 456 mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
442 mask = mask->next; 457 mask = mask->next;
443 } 458 }
444} 459}
445 460
446static int __init s390_topology_init(void) 461void __init topology_init_early(void)
447{ 462{
448 struct sysinfo_15_1_x *info; 463 struct sysinfo_15_1_x *info;
449 int i; 464 int i;
450 465
451 set_sched_topology(s390_topology); 466 set_sched_topology(s390_topology);
452 if (!MACHINE_HAS_TOPOLOGY) 467 if (!MACHINE_HAS_TOPOLOGY)
453 return 0; 468 goto out;
454 tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); 469 tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
455 info = tl_info; 470 info = tl_info;
456 store_topology(info); 471 store_topology(info);
457 pr_info("The CPU configuration topology of the machine is:"); 472 pr_info("The CPU configuration topology of the machine is:");
@@ -461,9 +476,9 @@ static int __init s390_topology_init(void)
461 alloc_masks(info, &socket_info, 1); 476 alloc_masks(info, &socket_info, 1);
462 alloc_masks(info, &book_info, 2); 477 alloc_masks(info, &book_info, 2);
463 alloc_masks(info, &drawer_info, 3); 478 alloc_masks(info, &drawer_info, 3);
464 return 0; 479out:
480 __arch_update_cpu_topology();
465} 481}
466early_initcall(s390_topology_init);
467 482
468static int __init topology_init(void) 483static int __init topology_init(void)
469{ 484{
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 2ed27e8eb4d4..02b840d8f9af 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <linux/memblock.h> 23#include <linux/memblock.h>
24#include <linux/bootmem.h>
24#include <linux/node.h> 25#include <linux/node.h>
25#include <linux/memory.h> 26#include <linux/memory.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
@@ -307,13 +308,11 @@ fail:
307/* 308/*
308 * Allocate and initialize core to node mapping 309 * Allocate and initialize core to node mapping
309 */ 310 */
310static void create_core_to_node_map(void) 311static void __ref create_core_to_node_map(void)
311{ 312{
312 int i; 313 int i;
313 314
314 emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL); 315 emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
315 if (emu_cores == NULL)
316 panic("Could not allocate cores to node memory");
317 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) 316 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
318 emu_cores->to_node_id[i] = NODE_ID_FREE; 317 emu_cores->to_node_id[i] = NODE_ID_FREE;
319} 318}
@@ -354,7 +353,7 @@ static struct toptree *toptree_from_topology(void)
354 353
355 phys = toptree_new(TOPTREE_ID_PHYS, 1); 354 phys = toptree_new(TOPTREE_ID_PHYS, 1);
356 355
357 for_each_online_cpu(cpu) { 356 for_each_cpu(cpu, &cpus_with_topology) {
358 top = &cpu_topology[cpu]; 357 top = &cpu_topology[cpu];
359 node = toptree_get_child(phys, 0); 358 node = toptree_get_child(phys, 0);
360 drawer = toptree_get_child(node, top->drawer_id); 359 drawer = toptree_get_child(node, top->drawer_id);
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 902d350d859a..26f622b1cd11 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/bootmem.h>
10#include <linux/cpumask.h> 11#include <linux/cpumask.h>
11#include <linux/list.h> 12#include <linux/list.h>
12#include <linux/list_sort.h> 13#include <linux/list_sort.h>
@@ -25,10 +26,14 @@
25 * RETURNS: 26 * RETURNS:
26 * Pointer to the new tree node or NULL on error 27 * Pointer to the new tree node or NULL on error
27 */ 28 */
28struct toptree *toptree_alloc(int level, int id) 29struct toptree __ref *toptree_alloc(int level, int id)
29{ 30{
30 struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL); 31 struct toptree *res;
31 32
33 if (slab_is_available())
34 res = kzalloc(sizeof(*res), GFP_KERNEL);
35 else
36 res = memblock_virt_alloc(sizeof(*res), 8);
32 if (!res) 37 if (!res)
33 return res; 38 return res;
34 39
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
65 * cleanly using toptree_remove. Possible children are freed 70 * cleanly using toptree_remove. Possible children are freed
66 * recursively. In the end @cand itself is freed. 71 * recursively. In the end @cand itself is freed.
67 */ 72 */
68void toptree_free(struct toptree *cand) 73void __ref toptree_free(struct toptree *cand)
69{ 74{
70 struct toptree *child, *tmp; 75 struct toptree *child, *tmp;
71 76
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
73 toptree_remove(cand); 78 toptree_remove(cand);
74 toptree_for_each_child_safe(child, tmp, cand) 79 toptree_for_each_child_safe(child, tmp, cand)
75 toptree_free(child); 80 toptree_free(child);
76 kfree(cand); 81 if (slab_is_available())
82 kfree(cand);
83 else
84 memblock_free_early((unsigned long)cand, sizeof(*cand));
77} 85}
78 86
79/** 87/**