aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-11-12 04:03:25 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-11-23 05:14:31 -0500
commitd1e57508fbd63b340788afe6f2c74a608603e714 (patch)
treea4f32399d06ea644f3a6cb6f02b87ad9082b3bb6 /arch/s390
parent0a4ccc992978ef552dc86ac68bc1ec62cf268e2a (diff)
s390/topology: cleanup topology code
Mainly merge all different per-cpu arrays into a single array which holds all topology information per logical cpu. Also fix the broken core vs socket variable naming and simplify the locking a bit. When running in environments without topology information also invent book, socket and core ids, so that not all ids are zero. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/topology.h34
-rw-r--r--arch/s390/kernel/topology.c113
2 files changed, 72 insertions, 75 deletions
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 9935cbd6a46f..05425b18c0aa 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -8,32 +8,34 @@ struct cpu;
8 8
9#ifdef CONFIG_SCHED_BOOK 9#ifdef CONFIG_SCHED_BOOK
10 10
11extern unsigned char cpu_socket_id[NR_CPUS]; 11struct cpu_topology_s390 {
12#define topology_physical_package_id(cpu) (cpu_socket_id[cpu]) 12 unsigned short core_id;
13 unsigned short socket_id;
14 unsigned short book_id;
15 cpumask_t core_mask;
16 cpumask_t book_mask;
17};
18
19extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
20
21#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
22#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
23#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
24#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
25#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
13 26
14extern unsigned char cpu_core_id[NR_CPUS]; 27#define mc_capable() 1
15extern cpumask_t cpu_core_map[NR_CPUS];
16 28
17static inline const struct cpumask *cpu_coregroup_mask(int cpu) 29static inline const struct cpumask *cpu_coregroup_mask(int cpu)
18{ 30{
19 return &cpu_core_map[cpu]; 31 return &cpu_topology[cpu].core_mask;
20} 32}
21 33
22#define topology_core_id(cpu) (cpu_core_id[cpu])
23#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
24#define mc_capable() (1)
25
26extern unsigned char cpu_book_id[NR_CPUS];
27extern cpumask_t cpu_book_map[NR_CPUS];
28
29static inline const struct cpumask *cpu_book_mask(int cpu) 34static inline const struct cpumask *cpu_book_mask(int cpu)
30{ 35{
31 return &cpu_book_map[cpu]; 36 return &cpu_topology[cpu].book_mask;
32} 37}
33 38
34#define topology_book_id(cpu) (cpu_book_id[cpu])
35#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
36
37int topology_cpu_init(struct cpu *); 39int topology_cpu_init(struct cpu *);
38int topology_set_cpu_management(int fc); 40int topology_set_cpu_management(int fc);
39void topology_schedule_update(void); 41void topology_schedule_update(void);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index dd55f7c20104..f1aba87cceb8 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -29,48 +29,38 @@ struct mask_info {
29 cpumask_t mask; 29 cpumask_t mask;
30}; 30};
31 31
32static int topology_enabled = 1; 32static void set_topology_timer(void);
33static void topology_work_fn(struct work_struct *work); 33static void topology_work_fn(struct work_struct *work);
34static struct sysinfo_15_1_x *tl_info; 34static struct sysinfo_15_1_x *tl_info;
35static void set_topology_timer(void);
36static DECLARE_WORK(topology_work, topology_work_fn);
37/* topology_lock protects the core linked list */
38static DEFINE_SPINLOCK(topology_lock);
39 35
40static struct mask_info core_info; 36static int topology_enabled = 1;
41cpumask_t cpu_core_map[NR_CPUS]; 37static DECLARE_WORK(topology_work, topology_work_fn);
42unsigned char cpu_core_id[NR_CPUS];
43unsigned char cpu_socket_id[NR_CPUS];
44 38
39/* topology_lock protects the socket and book linked lists */
40static DEFINE_SPINLOCK(topology_lock);
41static struct mask_info socket_info;
45static struct mask_info book_info; 42static struct mask_info book_info;
46cpumask_t cpu_book_map[NR_CPUS]; 43
47unsigned char cpu_book_id[NR_CPUS]; 44struct cpu_topology_s390 cpu_topology[NR_CPUS];
48 45
49static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 46static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
50{ 47{
51 cpumask_t mask; 48 cpumask_t mask;
52 49
53 cpumask_clear(&mask); 50 cpumask_copy(&mask, cpumask_of(cpu));
54 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { 51 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
55 cpumask_copy(&mask, cpumask_of(cpu));
56 return mask; 52 return mask;
53 for (; info; info = info->next) {
54 if (cpumask_test_cpu(cpu, &info->mask))
55 return info->mask;
57 } 56 }
58 while (info) {
59 if (cpumask_test_cpu(cpu, &info->mask)) {
60 mask = info->mask;
61 break;
62 }
63 info = info->next;
64 }
65 if (cpumask_empty(&mask))
66 cpumask_copy(&mask, cpumask_of(cpu));
67 return mask; 57 return mask;
68} 58}
69 59
70static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, 60static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
71 struct mask_info *book, 61 struct mask_info *book,
72 struct mask_info *core, 62 struct mask_info *socket,
73 int one_core_per_cpu) 63 int one_socket_per_cpu)
74{ 64{
75 unsigned int cpu; 65 unsigned int cpu;
76 66
@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
80 70
81 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 71 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
82 lcpu = smp_find_processor_id(rcpu); 72 lcpu = smp_find_processor_id(rcpu);
83 if (lcpu >= 0) { 73 if (lcpu < 0)
84 cpumask_set_cpu(lcpu, &book->mask); 74 continue;
85 cpu_book_id[lcpu] = book->id; 75 cpumask_set_cpu(lcpu, &book->mask);
86 cpumask_set_cpu(lcpu, &core->mask); 76 cpu_topology[lcpu].book_id = book->id;
87 cpu_core_id[lcpu] = rcpu; 77 cpumask_set_cpu(lcpu, &socket->mask);
88 if (one_core_per_cpu) { 78 cpu_topology[lcpu].core_id = rcpu;
89 cpu_socket_id[lcpu] = rcpu; 79 if (one_socket_per_cpu) {
90 core = core->next; 80 cpu_topology[lcpu].socket_id = rcpu;
91 } else { 81 socket = socket->next;
92 cpu_socket_id[lcpu] = core->id; 82 } else {
93 } 83 cpu_topology[lcpu].socket_id = socket->id;
94 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
95 } 84 }
85 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
96 } 86 }
97 return core; 87 return socket;
98} 88}
99 89
100static void clear_masks(void) 90static void clear_masks(void)
101{ 91{
102 struct mask_info *info; 92 struct mask_info *info;
103 93
104 info = &core_info; 94 info = &socket_info;
105 while (info) { 95 while (info) {
106 cpumask_clear(&info->mask); 96 cpumask_clear(&info->mask);
107 info = info->next; 97 info = info->next;
@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
120 return (union topology_entry *)((struct topology_container *)tle + 1); 110 return (union topology_entry *)((struct topology_container *)tle + 1);
121} 111}
122 112
123static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) 113static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
124{ 114{
125 struct mask_info *core = &core_info; 115 struct mask_info *socket = &socket_info;
126 struct mask_info *book = &book_info; 116 struct mask_info *book = &book_info;
127 union topology_entry *tle, *end; 117 union topology_entry *tle, *end;
128 118
@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
135 book->id = tle->container.id; 125 book->id = tle->container.id;
136 break; 126 break;
137 case 1: 127 case 1:
138 core = core->next; 128 socket = socket->next;
139 core->id = tle->container.id; 129 socket->id = tle->container.id;
140 break; 130 break;
141 case 0: 131 case 0:
142 add_cpus_to_mask(&tle->cpu, book, core, 0); 132 add_cpus_to_mask(&tle->cpu, book, socket, 0);
143 break; 133 break;
144 default: 134 default:
145 clear_masks(); 135 clear_masks();
@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
149 } 139 }
150} 140}
151 141
152static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) 142static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
153{ 143{
154 struct mask_info *core = &core_info; 144 struct mask_info *socket = &socket_info;
155 struct mask_info *book = &book_info; 145 struct mask_info *book = &book_info;
156 union topology_entry *tle, *end; 146 union topology_entry *tle, *end;
157 147
@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
164 book->id = tle->container.id; 154 book->id = tle->container.id;
165 break; 155 break;
166 case 0: 156 case 0:
167 core = add_cpus_to_mask(&tle->cpu, book, core, 1); 157 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
168 break; 158 break;
169 default: 159 default:
170 clear_masks(); 160 clear_masks();
@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
174 } 164 }
175} 165}
176 166
177static void tl_to_cores(struct sysinfo_15_1_x *info) 167static void tl_to_masks(struct sysinfo_15_1_x *info)
178{ 168{
179 struct cpuid cpu_id; 169 struct cpuid cpu_id;
180 170
181 get_cpu_id(&cpu_id);
182 spin_lock_irq(&topology_lock); 171 spin_lock_irq(&topology_lock);
172 get_cpu_id(&cpu_id);
183 clear_masks(); 173 clear_masks();
184 switch (cpu_id.machine) { 174 switch (cpu_id.machine) {
185 case 0x2097: 175 case 0x2097:
186 case 0x2098: 176 case 0x2098:
187 __tl_to_cores_z10(info); 177 __tl_to_masks_z10(info);
188 break; 178 break;
189 default: 179 default:
190 __tl_to_cores_generic(info); 180 __tl_to_masks_generic(info);
191 } 181 }
192 spin_unlock_irq(&topology_lock); 182 spin_unlock_irq(&topology_lock);
193} 183}
@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
232 return rc; 222 return rc;
233} 223}
234 224
235static void update_cpu_core_map(void) 225static void update_cpu_masks(void)
236{ 226{
237 unsigned long flags; 227 unsigned long flags;
238 int cpu; 228 int cpu;
239 229
240 spin_lock_irqsave(&topology_lock, flags); 230 spin_lock_irqsave(&topology_lock, flags);
241 for_each_possible_cpu(cpu) { 231 for_each_possible_cpu(cpu) {
242 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); 232 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
243 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); 233 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
234 if (!MACHINE_HAS_TOPOLOGY) {
235 cpu_topology[cpu].core_id = cpu;
236 cpu_topology[cpu].socket_id = cpu;
237 cpu_topology[cpu].book_id = cpu;
238 }
244 } 239 }
245 spin_unlock_irqrestore(&topology_lock, flags); 240 spin_unlock_irqrestore(&topology_lock, flags);
246} 241}
@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
260 int cpu; 255 int cpu;
261 256
262 if (!MACHINE_HAS_TOPOLOGY) { 257 if (!MACHINE_HAS_TOPOLOGY) {
263 update_cpu_core_map(); 258 update_cpu_masks();
264 topology_update_polarization_simple(); 259 topology_update_polarization_simple();
265 return 0; 260 return 0;
266 } 261 }
267 store_topology(info); 262 store_topology(info);
268 tl_to_cores(info); 263 tl_to_masks(info);
269 update_cpu_core_map(); 264 update_cpu_masks();
270 for_each_online_cpu(cpu) { 265 for_each_online_cpu(cpu) {
271 dev = get_cpu_device(cpu); 266 dev = get_cpu_device(cpu);
272 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 267 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
355 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 350 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
356 printk(KERN_CONT " %d", info->mag[i]); 351 printk(KERN_CONT " %d", info->mag[i]);
357 printk(KERN_CONT " / %d\n", info->mnest); 352 printk(KERN_CONT " / %d\n", info->mnest);
358 alloc_masks(info, &core_info, 1); 353 alloc_masks(info, &socket_info, 1);
359 alloc_masks(info, &book_info, 2); 354 alloc_masks(info, &book_info, 2);
360} 355}
361 356
@@ -454,7 +449,7 @@ static int __init topology_init(void)
454 } 449 }
455 set_topology_timer(); 450 set_topology_timer();
456out: 451out:
457 update_cpu_core_map(); 452 update_cpu_masks();
458 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); 453 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
459} 454}
460device_initcall(topology_init); 455device_initcall(topology_init);