summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2016-12-02 04:38:37 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-12-07 01:23:16 -0500
commit30fc4ca2a8ab508d160a917b89b7e1c27f893354 (patch)
tree90b80934e4e7189eef1ef0b3f4c17eb5f7af9588 /arch/s390
parentaf51160ebd3cc1c8bf0d37a48f13ac0dbe8a6e5f (diff)
s390/topology: use cpu_topology array instead of per cpu variable
CPU topology information like cpu to node mapping must be setup in setup_arch already. Topology information is currently made available with a per cpu variable; this however will not work when the initialization will be moved to setup_arch, since the generic percpu setup will be done much later. Therefore convert back to a cpu_topology array. Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/topology.h25
-rw-r--r--arch/s390/kernel/topology.c18
-rw-r--r--arch/s390/numa/mode_emu.c4
3 files changed, 23 insertions, 24 deletions
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index f15f5571ca2b..bc6f45421c98 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,18 +22,17 @@ struct cpu_topology_s390 {
22 cpumask_t drawer_mask; 22 cpumask_t drawer_mask;
23}; 23};
24 24
25DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology); 25extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
26 26
27#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) 27#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
28#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) 28#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
29#define topology_sibling_cpumask(cpu) \ 29#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
30 (&per_cpu(cpu_topology, cpu).thread_mask) 30#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
31#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) 31#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
32#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) 32#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
33#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) 33#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
34#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) 34#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
35#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id) 35#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
36#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
37 36
38#define mc_capable() 1 37#define mc_capable() 1
39 38
@@ -65,7 +64,7 @@ static inline void topology_expect_change(void) { }
65#define cpu_to_node cpu_to_node 64#define cpu_to_node cpu_to_node
66static inline int cpu_to_node(int cpu) 65static inline int cpu_to_node(int cpu)
67{ 66{
68 return per_cpu(cpu_topology, cpu).node_id; 67 return cpu_topology[cpu].node_id;
69} 68}
70 69
71/* Returns a pointer to the cpumask of CPUs on node 'node'. */ 70/* Returns a pointer to the cpumask of CPUs on node 'node'. */
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8705ee66c087..7169d112c91a 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -41,15 +41,15 @@ static bool topology_enabled = true;
41static DECLARE_WORK(topology_work, topology_work_fn); 41static DECLARE_WORK(topology_work, topology_work_fn);
42 42
43/* 43/*
44 * Socket/Book linked lists and per_cpu(cpu_topology) updates are 44 * Socket/Book linked lists and cpu_topology updates are
45 * protected by "sched_domains_mutex". 45 * protected by "sched_domains_mutex".
46 */ 46 */
47static struct mask_info socket_info; 47static struct mask_info socket_info;
48static struct mask_info book_info; 48static struct mask_info book_info;
49static struct mask_info drawer_info; 49static struct mask_info drawer_info;
50 50
51DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology); 51struct cpu_topology_s390 cpu_topology[NR_CPUS];
52EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology); 52EXPORT_SYMBOL_GPL(cpu_topology);
53 53
54static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 54static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
55{ 55{
@@ -97,7 +97,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
97 if (lcpu < 0) 97 if (lcpu < 0)
98 continue; 98 continue;
99 for (i = 0; i <= smp_cpu_mtid; i++) { 99 for (i = 0; i <= smp_cpu_mtid; i++) {
100 topo = &per_cpu(cpu_topology, lcpu + i); 100 topo = &cpu_topology[lcpu + i];
101 topo->drawer_id = drawer->id; 101 topo->drawer_id = drawer->id;
102 topo->book_id = book->id; 102 topo->book_id = book->id;
103 topo->socket_id = socket->id; 103 topo->socket_id = socket->id;
@@ -220,7 +220,7 @@ static void update_cpu_masks(void)
220 int cpu; 220 int cpu;
221 221
222 for_each_possible_cpu(cpu) { 222 for_each_possible_cpu(cpu) {
223 topo = &per_cpu(cpu_topology, cpu); 223 topo = &cpu_topology[cpu];
224 topo->thread_mask = cpu_thread_map(cpu); 224 topo->thread_mask = cpu_thread_map(cpu);
225 topo->core_mask = cpu_group_map(&socket_info, cpu); 225 topo->core_mask = cpu_group_map(&socket_info, cpu);
226 topo->book_mask = cpu_group_map(&book_info, cpu); 226 topo->book_mask = cpu_group_map(&book_info, cpu);
@@ -394,23 +394,23 @@ int topology_cpu_init(struct cpu *cpu)
394 394
395static const struct cpumask *cpu_thread_mask(int cpu) 395static const struct cpumask *cpu_thread_mask(int cpu)
396{ 396{
397 return &per_cpu(cpu_topology, cpu).thread_mask; 397 return &cpu_topology[cpu].thread_mask;
398} 398}
399 399
400 400
401const struct cpumask *cpu_coregroup_mask(int cpu) 401const struct cpumask *cpu_coregroup_mask(int cpu)
402{ 402{
403 return &per_cpu(cpu_topology, cpu).core_mask; 403 return &cpu_topology[cpu].core_mask;
404} 404}
405 405
406static const struct cpumask *cpu_book_mask(int cpu) 406static const struct cpumask *cpu_book_mask(int cpu)
407{ 407{
408 return &per_cpu(cpu_topology, cpu).book_mask; 408 return &cpu_topology[cpu].book_mask;
409} 409}
410 410
411static const struct cpumask *cpu_drawer_mask(int cpu) 411static const struct cpumask *cpu_drawer_mask(int cpu)
412{ 412{
413 return &per_cpu(cpu_topology, cpu).drawer_mask; 413 return &cpu_topology[cpu].drawer_mask;
414} 414}
415 415
416static int __init early_parse_topology(char *p) 416static int __init early_parse_topology(char *p)
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index b83109328fec..2ed27e8eb4d4 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -355,7 +355,7 @@ static struct toptree *toptree_from_topology(void)
355 phys = toptree_new(TOPTREE_ID_PHYS, 1); 355 phys = toptree_new(TOPTREE_ID_PHYS, 1);
356 356
357 for_each_online_cpu(cpu) { 357 for_each_online_cpu(cpu) {
358 top = &per_cpu(cpu_topology, cpu); 358 top = &cpu_topology[cpu];
359 node = toptree_get_child(phys, 0); 359 node = toptree_get_child(phys, 0);
360 drawer = toptree_get_child(node, top->drawer_id); 360 drawer = toptree_get_child(node, top->drawer_id);
361 book = toptree_get_child(drawer, top->book_id); 361 book = toptree_get_child(drawer, top->book_id);
@@ -378,7 +378,7 @@ static void topology_add_core(struct toptree *core)
378 int cpu; 378 int cpu;
379 379
380 for_each_cpu(cpu, &core->mask) { 380 for_each_cpu(cpu, &core->mask) {
381 top = &per_cpu(cpu_topology, cpu); 381 top = &cpu_topology[cpu];
382 cpumask_copy(&top->thread_mask, &core->mask); 382 cpumask_copy(&top->thread_mask, &core->mask);
383 cpumask_copy(&top->core_mask, &core_mc(core)->mask); 383 cpumask_copy(&top->core_mask, &core_mc(core)->mask);
384 cpumask_copy(&top->book_mask, &core_book(core)->mask); 384 cpumask_copy(&top->book_mask, &core_book(core)->mask);