aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2015-02-04 08:48:25 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-02-12 03:37:22 -0500
commitda0c636ea79380c2001f319844e9a237cf211f96 (patch)
treed80530069364e6d89d4384234414b6a558d33249
parentd05d15da18f521c4fb5a35b923ce33955c848d99 (diff)
s390/topology: convert cpu_topology array to per cpu variable
Convert the per cpu topology cpu masks to a per cpu variable. At least for machines which do have less possible cpus than NR_CPUS this can save a bit of memory (z/VM: max 64 vs 512 for performance_defconfig). This reduces the kernel image size by 100k. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/topology.h18
-rw-r--r--arch/s390/kernel/topology.c34
2 files changed, 26 insertions, 26 deletions
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 9454231c9f23..b1453a2ae1ca 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -18,15 +18,15 @@ struct cpu_topology_s390 {
18 cpumask_t book_mask; 18 cpumask_t book_mask;
19}; 19};
20 20
21extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; 21DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
22 22
23#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 23#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
24#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) 24#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
25#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask) 25#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
26#define topology_core_id(cpu) (cpu_topology[cpu].core_id) 26#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
27#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) 27#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
28#define topology_book_id(cpu) (cpu_topology[cpu].book_id) 28#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
29#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) 29#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
30 30
31#define mc_capable() 1 31#define mc_capable() 1
32 32
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index d2303f6340ab..14da43b801d9 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
42static struct mask_info socket_info; 42static struct mask_info socket_info;
43static struct mask_info book_info; 43static struct mask_info book_info;
44 44
45struct cpu_topology_s390 cpu_topology[NR_CPUS]; 45DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
46EXPORT_SYMBOL_GPL(cpu_topology); 46EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
47 47
48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
49{ 49{
@@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
90 if (lcpu < 0) 90 if (lcpu < 0)
91 continue; 91 continue;
92 for (i = 0; i <= smp_cpu_mtid; i++) { 92 for (i = 0; i <= smp_cpu_mtid; i++) {
93 cpu_topology[lcpu + i].book_id = book->id; 93 per_cpu(cpu_topology, lcpu + i).book_id = book->id;
94 cpu_topology[lcpu + i].core_id = rcore; 94 per_cpu(cpu_topology, lcpu + i).core_id = rcore;
95 cpu_topology[lcpu + i].thread_id = lcpu + i; 95 per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
96 cpumask_set_cpu(lcpu + i, &book->mask); 96 cpumask_set_cpu(lcpu + i, &book->mask);
97 cpumask_set_cpu(lcpu + i, &socket->mask); 97 cpumask_set_cpu(lcpu + i, &socket->mask);
98 if (one_socket_per_cpu) 98 if (one_socket_per_cpu)
99 cpu_topology[lcpu + i].socket_id = rcore; 99 per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
100 else 100 else
101 cpu_topology[lcpu + i].socket_id = socket->id; 101 per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
102 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 102 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
103 } 103 }
104 if (one_socket_per_cpu) 104 if (one_socket_per_cpu)
@@ -249,14 +249,14 @@ static void update_cpu_masks(void)
249 249
250 spin_lock_irqsave(&topology_lock, flags); 250 spin_lock_irqsave(&topology_lock, flags);
251 for_each_possible_cpu(cpu) { 251 for_each_possible_cpu(cpu) {
252 cpu_topology[cpu].thread_mask = cpu_thread_map(cpu); 252 per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
253 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); 253 per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
254 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); 254 per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
255 if (!MACHINE_HAS_TOPOLOGY) { 255 if (!MACHINE_HAS_TOPOLOGY) {
256 cpu_topology[cpu].thread_id = cpu; 256 per_cpu(cpu_topology, cpu).thread_id = cpu;
257 cpu_topology[cpu].core_id = cpu; 257 per_cpu(cpu_topology, cpu).core_id = cpu;
258 cpu_topology[cpu].socket_id = cpu; 258 per_cpu(cpu_topology, cpu).socket_id = cpu;
259 cpu_topology[cpu].book_id = cpu; 259 per_cpu(cpu_topology, cpu).book_id = cpu;
260 } 260 }
261 } 261 }
262 spin_unlock_irqrestore(&topology_lock, flags); 262 spin_unlock_irqrestore(&topology_lock, flags);
@@ -423,18 +423,18 @@ int topology_cpu_init(struct cpu *cpu)
423 423
424const struct cpumask *cpu_thread_mask(int cpu) 424const struct cpumask *cpu_thread_mask(int cpu)
425{ 425{
426 return &cpu_topology[cpu].thread_mask; 426 return &per_cpu(cpu_topology, cpu).thread_mask;
427} 427}
428 428
429 429
430const struct cpumask *cpu_coregroup_mask(int cpu) 430const struct cpumask *cpu_coregroup_mask(int cpu)
431{ 431{
432 return &cpu_topology[cpu].core_mask; 432 return &per_cpu(cpu_topology, cpu).core_mask;
433} 433}
434 434
435static const struct cpumask *cpu_book_mask(int cpu) 435static const struct cpumask *cpu_book_mask(int cpu)
436{ 436{
437 return &cpu_topology[cpu].book_mask; 437 return &per_cpu(cpu_topology, cpu).book_mask;
438} 438}
439 439
440static int __init early_parse_topology(char *p) 440static int __init early_parse_topology(char *p)