diff options
-rw-r--r-- | arch/s390/include/asm/topology.h | 3 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 33 | ||||
-rw-r--r-- | arch/s390/numa/mode_emu.c | 9 | ||||
-rw-r--r-- | arch/s390/numa/toptree.c | 16 |
5 files changed, 44 insertions, 18 deletions
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index bc6f45421c98..fa1bfce10370 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -23,6 +23,7 @@ struct cpu_topology_s390 { | |||
23 | }; | 23 | }; |
24 | 24 | ||
25 | extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; | 25 | extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; |
26 | extern cpumask_t cpus_with_topology; | ||
26 | 27 | ||
27 | #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) | 28 | #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) |
28 | #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) | 29 | #define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) |
@@ -36,6 +37,7 @@ extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; | |||
36 | 37 | ||
37 | #define mc_capable() 1 | 38 | #define mc_capable() 1 |
38 | 39 | ||
40 | void topology_init_early(void); | ||
39 | int topology_cpu_init(struct cpu *); | 41 | int topology_cpu_init(struct cpu *); |
40 | int topology_set_cpu_management(int fc); | 42 | int topology_set_cpu_management(int fc); |
41 | void topology_schedule_update(void); | 43 | void topology_schedule_update(void); |
@@ -45,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu); | |||
45 | 47 | ||
46 | #else /* CONFIG_SCHED_TOPOLOGY */ | 48 | #else /* CONFIG_SCHED_TOPOLOGY */ |
47 | 49 | ||
50 | static inline void topology_init_early(void) { } | ||
48 | static inline void topology_schedule_update(void) { } | 51 | static inline void topology_schedule_update(void) { } |
49 | static inline int topology_cpu_init(struct cpu *cpu) { return 0; } | 52 | static inline int topology_cpu_init(struct cpu *cpu) { return 0; } |
50 | static inline void topology_expect_change(void) { } | 53 | static inline void topology_expect_change(void) { } |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index aba3c5ce1559..adfac9f0a89f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -924,6 +924,7 @@ void __init setup_arch(char **cmdline_p) | |||
924 | cpu_init(); | 924 | cpu_init(); |
925 | numa_setup(); | 925 | numa_setup(); |
926 | smp_detect_cpus(); | 926 | smp_detect_cpus(); |
927 | topology_init_early(); | ||
927 | 928 | ||
928 | /* | 929 | /* |
929 | * Create kernel page tables and switch to virtual addressing. | 930 | * Create kernel page tables and switch to virtual addressing. |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 7169d112c91a..93dcbae1e98d 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
8 | 8 | ||
9 | #include <linux/workqueue.h> | 9 | #include <linux/workqueue.h> |
10 | #include <linux/bootmem.h> | ||
10 | #include <linux/cpuset.h> | 11 | #include <linux/cpuset.h> |
11 | #include <linux/device.h> | 12 | #include <linux/device.h> |
12 | #include <linux/export.h> | 13 | #include <linux/export.h> |
@@ -51,6 +52,8 @@ static struct mask_info drawer_info; | |||
51 | struct cpu_topology_s390 cpu_topology[NR_CPUS]; | 52 | struct cpu_topology_s390 cpu_topology[NR_CPUS]; |
52 | EXPORT_SYMBOL_GPL(cpu_topology); | 53 | EXPORT_SYMBOL_GPL(cpu_topology); |
53 | 54 | ||
55 | cpumask_t cpus_with_topology; | ||
56 | |||
54 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | 57 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) |
55 | { | 58 | { |
56 | cpumask_t mask; | 59 | cpumask_t mask; |
@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core, | |||
106 | cpumask_set_cpu(lcpu + i, &drawer->mask); | 109 | cpumask_set_cpu(lcpu + i, &drawer->mask); |
107 | cpumask_set_cpu(lcpu + i, &book->mask); | 110 | cpumask_set_cpu(lcpu + i, &book->mask); |
108 | cpumask_set_cpu(lcpu + i, &socket->mask); | 111 | cpumask_set_cpu(lcpu + i, &socket->mask); |
112 | cpumask_set_cpu(lcpu + i, &cpus_with_topology); | ||
109 | smp_cpu_set_polarization(lcpu + i, tl_core->pp); | 113 | smp_cpu_set_polarization(lcpu + i, tl_core->pp); |
110 | } | 114 | } |
111 | } | 115 | } |
@@ -231,6 +235,8 @@ static void update_cpu_masks(void) | |||
231 | topo->socket_id = cpu; | 235 | topo->socket_id = cpu; |
232 | topo->book_id = cpu; | 236 | topo->book_id = cpu; |
233 | topo->drawer_id = cpu; | 237 | topo->drawer_id = cpu; |
238 | if (cpu_present(cpu)) | ||
239 | cpumask_set_cpu(cpu, &cpus_with_topology); | ||
234 | } | 240 | } |
235 | } | 241 | } |
236 | numa_update_cpu_topology(); | 242 | numa_update_cpu_topology(); |
@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info) | |||
241 | stsi(info, 15, 1, min(topology_max_mnest, 4)); | 247 | stsi(info, 15, 1, min(topology_max_mnest, 4)); |
242 | } | 248 | } |
243 | 249 | ||
244 | int arch_update_cpu_topology(void) | 250 | static int __arch_update_cpu_topology(void) |
245 | { | 251 | { |
246 | struct sysinfo_15_1_x *info = tl_info; | 252 | struct sysinfo_15_1_x *info = tl_info; |
247 | struct device *dev; | 253 | int rc = 0; |
248 | int cpu, rc = 0; | ||
249 | 254 | ||
255 | cpumask_clear(&cpus_with_topology); | ||
250 | if (MACHINE_HAS_TOPOLOGY) { | 256 | if (MACHINE_HAS_TOPOLOGY) { |
251 | rc = 1; | 257 | rc = 1; |
252 | store_topology(info); | 258 | store_topology(info); |
@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void) | |||
255 | update_cpu_masks(); | 261 | update_cpu_masks(); |
256 | if (!MACHINE_HAS_TOPOLOGY) | 262 | if (!MACHINE_HAS_TOPOLOGY) |
257 | topology_update_polarization_simple(); | 263 | topology_update_polarization_simple(); |
264 | return rc; | ||
265 | } | ||
266 | |||
267 | int arch_update_cpu_topology(void) | ||
268 | { | ||
269 | struct device *dev; | ||
270 | int cpu, rc; | ||
271 | |||
272 | rc = __arch_update_cpu_topology(); | ||
258 | for_each_online_cpu(cpu) { | 273 | for_each_online_cpu(cpu) { |
259 | dev = get_cpu_device(cpu); | 274 | dev = get_cpu_device(cpu); |
260 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); | 275 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); |
@@ -438,20 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, | |||
438 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; | 453 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; |
439 | nr_masks = max(nr_masks, 1); | 454 | nr_masks = max(nr_masks, 1); |
440 | for (i = 0; i < nr_masks; i++) { | 455 | for (i = 0; i < nr_masks; i++) { |
441 | mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL); | 456 | mask->next = memblock_virt_alloc(sizeof(*mask->next), 8); |
442 | mask = mask->next; | 457 | mask = mask->next; |
443 | } | 458 | } |
444 | } | 459 | } |
445 | 460 | ||
446 | static int __init s390_topology_init(void) | 461 | void __init topology_init_early(void) |
447 | { | 462 | { |
448 | struct sysinfo_15_1_x *info; | 463 | struct sysinfo_15_1_x *info; |
449 | int i; | 464 | int i; |
450 | 465 | ||
451 | set_sched_topology(s390_topology); | 466 | set_sched_topology(s390_topology); |
452 | if (!MACHINE_HAS_TOPOLOGY) | 467 | if (!MACHINE_HAS_TOPOLOGY) |
453 | return 0; | 468 | goto out; |
454 | tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); | 469 | tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE); |
455 | info = tl_info; | 470 | info = tl_info; |
456 | store_topology(info); | 471 | store_topology(info); |
457 | pr_info("The CPU configuration topology of the machine is:"); | 472 | pr_info("The CPU configuration topology of the machine is:"); |
@@ -461,9 +476,9 @@ static int __init s390_topology_init(void) | |||
461 | alloc_masks(info, &socket_info, 1); | 476 | alloc_masks(info, &socket_info, 1); |
462 | alloc_masks(info, &book_info, 2); | 477 | alloc_masks(info, &book_info, 2); |
463 | alloc_masks(info, &drawer_info, 3); | 478 | alloc_masks(info, &drawer_info, 3); |
464 | return 0; | 479 | out: |
480 | __arch_update_cpu_topology(); | ||
465 | } | 481 | } |
466 | early_initcall(s390_topology_init); | ||
467 | 482 | ||
468 | static int __init topology_init(void) | 483 | static int __init topology_init(void) |
469 | { | 484 | { |
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 2ed27e8eb4d4..02b840d8f9af 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
24 | #include <linux/bootmem.h> | ||
24 | #include <linux/node.h> | 25 | #include <linux/node.h> |
25 | #include <linux/memory.h> | 26 | #include <linux/memory.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
@@ -307,13 +308,11 @@ fail: | |||
307 | /* | 308 | /* |
308 | * Allocate and initialize core to node mapping | 309 | * Allocate and initialize core to node mapping |
309 | */ | 310 | */ |
310 | static void create_core_to_node_map(void) | 311 | static void __ref create_core_to_node_map(void) |
311 | { | 312 | { |
312 | int i; | 313 | int i; |
313 | 314 | ||
314 | emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL); | 315 | emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8); |
315 | if (emu_cores == NULL) | ||
316 | panic("Could not allocate cores to node memory"); | ||
317 | for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) | 316 | for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) |
318 | emu_cores->to_node_id[i] = NODE_ID_FREE; | 317 | emu_cores->to_node_id[i] = NODE_ID_FREE; |
319 | } | 318 | } |
@@ -354,7 +353,7 @@ static struct toptree *toptree_from_topology(void) | |||
354 | 353 | ||
355 | phys = toptree_new(TOPTREE_ID_PHYS, 1); | 354 | phys = toptree_new(TOPTREE_ID_PHYS, 1); |
356 | 355 | ||
357 | for_each_online_cpu(cpu) { | 356 | for_each_cpu(cpu, &cpus_with_topology) { |
358 | top = &cpu_topology[cpu]; | 357 | top = &cpu_topology[cpu]; |
359 | node = toptree_get_child(phys, 0); | 358 | node = toptree_get_child(phys, 0); |
360 | drawer = toptree_get_child(node, top->drawer_id); | 359 | drawer = toptree_get_child(node, top->drawer_id); |
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c index 902d350d859a..26f622b1cd11 100644 --- a/arch/s390/numa/toptree.c +++ b/arch/s390/numa/toptree.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/bootmem.h> | ||
10 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
11 | #include <linux/list.h> | 12 | #include <linux/list.h> |
12 | #include <linux/list_sort.h> | 13 | #include <linux/list_sort.h> |
@@ -25,10 +26,14 @@ | |||
25 | * RETURNS: | 26 | * RETURNS: |
26 | * Pointer to the new tree node or NULL on error | 27 | * Pointer to the new tree node or NULL on error |
27 | */ | 28 | */ |
28 | struct toptree *toptree_alloc(int level, int id) | 29 | struct toptree __ref *toptree_alloc(int level, int id) |
29 | { | 30 | { |
30 | struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL); | 31 | struct toptree *res; |
31 | 32 | ||
33 | if (slab_is_available()) | ||
34 | res = kzalloc(sizeof(*res), GFP_KERNEL); | ||
35 | else | ||
36 | res = memblock_virt_alloc(sizeof(*res), 8); | ||
32 | if (!res) | 37 | if (!res) |
33 | return res; | 38 | return res; |
34 | 39 | ||
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand) | |||
65 | * cleanly using toptree_remove. Possible children are freed | 70 | * cleanly using toptree_remove. Possible children are freed |
66 | * recursively. In the end @cand itself is freed. | 71 | * recursively. In the end @cand itself is freed. |
67 | */ | 72 | */ |
68 | void toptree_free(struct toptree *cand) | 73 | void __ref toptree_free(struct toptree *cand) |
69 | { | 74 | { |
70 | struct toptree *child, *tmp; | 75 | struct toptree *child, *tmp; |
71 | 76 | ||
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand) | |||
73 | toptree_remove(cand); | 78 | toptree_remove(cand); |
74 | toptree_for_each_child_safe(child, tmp, cand) | 79 | toptree_for_each_child_safe(child, tmp, cand) |
75 | toptree_free(child); | 80 | toptree_free(child); |
76 | kfree(cand); | 81 | if (slab_is_available()) |
82 | kfree(cand); | ||
83 | else | ||
84 | memblock_free_early((unsigned long)cand, sizeof(*cand)); | ||
77 | } | 85 | } |
78 | 86 | ||
79 | /** | 87 | /** |