aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/topology.c')
-rw-r--r--arch/s390/kernel/topology.c281
1 files changed, 190 insertions, 91 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index fdb5b8cb260f..7370a41948ca 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,22 +1,22 @@
1/* 1/*
2 * Copyright IBM Corp. 2007 2 * Copyright IBM Corp. 2007,2011
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */ 4 */
5 5
6#define KMSG_COMPONENT "cpu" 6#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 8
9#include <linux/kernel.h> 9#include <linux/workqueue.h>
10#include <linux/mm.h>
11#include <linux/init.h>
12#include <linux/device.h>
13#include <linux/bootmem.h> 10#include <linux/bootmem.h>
11#include <linux/cpuset.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/workqueue.h> 15#include <linux/init.h>
16#include <linux/delay.h>
16#include <linux/cpu.h> 17#include <linux/cpu.h>
17#include <linux/smp.h> 18#include <linux/smp.h>
18#include <linux/cpuset.h> 19#include <linux/mm.h>
19#include <asm/delay.h>
20 20
21#define PTF_HORIZONTAL (0UL) 21#define PTF_HORIZONTAL (0UL)
22#define PTF_VERTICAL (1UL) 22#define PTF_VERTICAL (1UL)
@@ -31,7 +31,6 @@ struct mask_info {
31static int topology_enabled = 1; 31static int topology_enabled = 1;
32static void topology_work_fn(struct work_struct *work); 32static void topology_work_fn(struct work_struct *work);
33static struct sysinfo_15_1_x *tl_info; 33static struct sysinfo_15_1_x *tl_info;
34static struct timer_list topology_timer;
35static void set_topology_timer(void); 34static void set_topology_timer(void);
36static DECLARE_WORK(topology_work, topology_work_fn); 35static DECLARE_WORK(topology_work, topology_work_fn);
37/* topology_lock protects the core linked list */ 36/* topology_lock protects the core linked list */
@@ -41,11 +40,12 @@ static struct mask_info core_info;
41cpumask_t cpu_core_map[NR_CPUS]; 40cpumask_t cpu_core_map[NR_CPUS];
42unsigned char cpu_core_id[NR_CPUS]; 41unsigned char cpu_core_id[NR_CPUS];
43 42
44#ifdef CONFIG_SCHED_BOOK
45static struct mask_info book_info; 43static struct mask_info book_info;
46cpumask_t cpu_book_map[NR_CPUS]; 44cpumask_t cpu_book_map[NR_CPUS];
47unsigned char cpu_book_id[NR_CPUS]; 45unsigned char cpu_book_id[NR_CPUS];
48#endif 46
47/* smp_cpu_state_mutex must be held when accessing this array */
48int cpu_polarization[NR_CPUS];
49 49
50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
51{ 51{
@@ -71,7 +71,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
71static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, 71static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
72 struct mask_info *book, 72 struct mask_info *book,
73 struct mask_info *core, 73 struct mask_info *core,
74 int z10) 74 int one_core_per_cpu)
75{ 75{
76 unsigned int cpu; 76 unsigned int cpu;
77 77
@@ -85,18 +85,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
85 for_each_present_cpu(lcpu) { 85 for_each_present_cpu(lcpu) {
86 if (cpu_logical_map(lcpu) != rcpu) 86 if (cpu_logical_map(lcpu) != rcpu)
87 continue; 87 continue;
88#ifdef CONFIG_SCHED_BOOK
89 cpumask_set_cpu(lcpu, &book->mask); 88 cpumask_set_cpu(lcpu, &book->mask);
90 cpu_book_id[lcpu] = book->id; 89 cpu_book_id[lcpu] = book->id;
91#endif
92 cpumask_set_cpu(lcpu, &core->mask); 90 cpumask_set_cpu(lcpu, &core->mask);
93 if (z10) { 91 if (one_core_per_cpu) {
94 cpu_core_id[lcpu] = rcpu; 92 cpu_core_id[lcpu] = rcpu;
95 core = core->next; 93 core = core->next;
96 } else { 94 } else {
97 cpu_core_id[lcpu] = core->id; 95 cpu_core_id[lcpu] = core->id;
98 } 96 }
99 smp_cpu_polarization[lcpu] = tl_cpu->pp; 97 cpu_set_polarization(lcpu, tl_cpu->pp);
100 } 98 }
101 } 99 }
102 return core; 100 return core;
@@ -111,13 +109,11 @@ static void clear_masks(void)
111 cpumask_clear(&info->mask); 109 cpumask_clear(&info->mask);
112 info = info->next; 110 info = info->next;
113 } 111 }
114#ifdef CONFIG_SCHED_BOOK
115 info = &book_info; 112 info = &book_info;
116 while (info) { 113 while (info) {
117 cpumask_clear(&info->mask); 114 cpumask_clear(&info->mask);
118 info = info->next; 115 info = info->next;
119 } 116 }
120#endif
121} 117}
122 118
123static union topology_entry *next_tle(union topology_entry *tle) 119static union topology_entry *next_tle(union topology_entry *tle)
@@ -127,66 +123,75 @@ static union topology_entry *next_tle(union topology_entry *tle)
127 return (union topology_entry *)((struct topology_container *)tle + 1); 123 return (union topology_entry *)((struct topology_container *)tle + 1);
128} 124}
129 125
130static void tl_to_cores(struct sysinfo_15_1_x *info) 126static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
131{ 127{
132#ifdef CONFIG_SCHED_BOOK
133 struct mask_info *book = &book_info;
134 struct cpuid cpu_id;
135#else
136 struct mask_info *book = NULL;
137#endif
138 struct mask_info *core = &core_info; 128 struct mask_info *core = &core_info;
129 struct mask_info *book = &book_info;
139 union topology_entry *tle, *end; 130 union topology_entry *tle, *end;
140 int z10 = 0;
141 131
142#ifdef CONFIG_SCHED_BOOK
143 get_cpu_id(&cpu_id);
144 z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
145#endif
146 spin_lock_irq(&topology_lock);
147 clear_masks();
148 tle = info->tle; 132 tle = info->tle;
149 end = (union topology_entry *)((unsigned long)info + info->length); 133 end = (union topology_entry *)((unsigned long)info + info->length);
150 while (tle < end) { 134 while (tle < end) {
151#ifdef CONFIG_SCHED_BOOK
152 if (z10) {
153 switch (tle->nl) {
154 case 1:
155 book = book->next;
156 book->id = tle->container.id;
157 break;
158 case 0:
159 core = add_cpus_to_mask(&tle->cpu, book, core, z10);
160 break;
161 default:
162 clear_masks();
163 goto out;
164 }
165 tle = next_tle(tle);
166 continue;
167 }
168#endif
169 switch (tle->nl) { 135 switch (tle->nl) {
170#ifdef CONFIG_SCHED_BOOK
171 case 2: 136 case 2:
172 book = book->next; 137 book = book->next;
173 book->id = tle->container.id; 138 book->id = tle->container.id;
174 break; 139 break;
175#endif
176 case 1: 140 case 1:
177 core = core->next; 141 core = core->next;
178 core->id = tle->container.id; 142 core->id = tle->container.id;
179 break; 143 break;
180 case 0: 144 case 0:
181 add_cpus_to_mask(&tle->cpu, book, core, z10); 145 add_cpus_to_mask(&tle->cpu, book, core, 0);
182 break; 146 break;
183 default: 147 default:
184 clear_masks(); 148 clear_masks();
185 goto out; 149 return;
186 } 150 }
187 tle = next_tle(tle); 151 tle = next_tle(tle);
188 } 152 }
189out: 153}
154
155static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
156{
157 struct mask_info *core = &core_info;
158 struct mask_info *book = &book_info;
159 union topology_entry *tle, *end;
160
161 tle = info->tle;
162 end = (union topology_entry *)((unsigned long)info + info->length);
163 while (tle < end) {
164 switch (tle->nl) {
165 case 1:
166 book = book->next;
167 book->id = tle->container.id;
168 break;
169 case 0:
170 core = add_cpus_to_mask(&tle->cpu, book, core, 1);
171 break;
172 default:
173 clear_masks();
174 return;
175 }
176 tle = next_tle(tle);
177 }
178}
179
180static void tl_to_cores(struct sysinfo_15_1_x *info)
181{
182 struct cpuid cpu_id;
183
184 get_cpu_id(&cpu_id);
185 spin_lock_irq(&topology_lock);
186 clear_masks();
187 switch (cpu_id.machine) {
188 case 0x2097:
189 case 0x2098:
190 __tl_to_cores_z10(info);
191 break;
192 default:
193 __tl_to_cores_generic(info);
194 }
190 spin_unlock_irq(&topology_lock); 195 spin_unlock_irq(&topology_lock);
191} 196}
192 197
@@ -196,7 +201,7 @@ static void topology_update_polarization_simple(void)
196 201
197 mutex_lock(&smp_cpu_state_mutex); 202 mutex_lock(&smp_cpu_state_mutex);
198 for_each_possible_cpu(cpu) 203 for_each_possible_cpu(cpu)
199 smp_cpu_polarization[cpu] = POLARIZATION_HRZ; 204 cpu_set_polarization(cpu, POLARIZATION_HRZ);
200 mutex_unlock(&smp_cpu_state_mutex); 205 mutex_unlock(&smp_cpu_state_mutex);
201} 206}
202 207
@@ -215,8 +220,7 @@ static int ptf(unsigned long fc)
215 220
216int topology_set_cpu_management(int fc) 221int topology_set_cpu_management(int fc)
217{ 222{
218 int cpu; 223 int cpu, rc;
219 int rc;
220 224
221 if (!MACHINE_HAS_TOPOLOGY) 225 if (!MACHINE_HAS_TOPOLOGY)
222 return -EOPNOTSUPP; 226 return -EOPNOTSUPP;
@@ -227,7 +231,7 @@ int topology_set_cpu_management(int fc)
227 if (rc) 231 if (rc)
228 return -EBUSY; 232 return -EBUSY;
229 for_each_possible_cpu(cpu) 233 for_each_possible_cpu(cpu)
230 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; 234 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
231 return rc; 235 return rc;
232} 236}
233 237
@@ -239,29 +243,25 @@ static void update_cpu_core_map(void)
239 spin_lock_irqsave(&topology_lock, flags); 243 spin_lock_irqsave(&topology_lock, flags);
240 for_each_possible_cpu(cpu) { 244 for_each_possible_cpu(cpu) {
241 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); 245 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
242#ifdef CONFIG_SCHED_BOOK
243 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); 246 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
244#endif
245 } 247 }
246 spin_unlock_irqrestore(&topology_lock, flags); 248 spin_unlock_irqrestore(&topology_lock, flags);
247} 249}
248 250
249void store_topology(struct sysinfo_15_1_x *info) 251void store_topology(struct sysinfo_15_1_x *info)
250{ 252{
251#ifdef CONFIG_SCHED_BOOK
252 int rc; 253 int rc;
253 254
254 rc = stsi(info, 15, 1, 3); 255 rc = stsi(info, 15, 1, 3);
255 if (rc != -ENOSYS) 256 if (rc != -ENOSYS)
256 return; 257 return;
257#endif
258 stsi(info, 15, 1, 2); 258 stsi(info, 15, 1, 2);
259} 259}
260 260
261int arch_update_cpu_topology(void) 261int arch_update_cpu_topology(void)
262{ 262{
263 struct sysinfo_15_1_x *info = tl_info; 263 struct sysinfo_15_1_x *info = tl_info;
264 struct sys_device *sysdev; 264 struct device *dev;
265 int cpu; 265 int cpu;
266 266
267 if (!MACHINE_HAS_TOPOLOGY) { 267 if (!MACHINE_HAS_TOPOLOGY) {
@@ -273,8 +273,8 @@ int arch_update_cpu_topology(void)
273 tl_to_cores(info); 273 tl_to_cores(info);
274 update_cpu_core_map(); 274 update_cpu_core_map();
275 for_each_online_cpu(cpu) { 275 for_each_online_cpu(cpu) {
276 sysdev = get_cpu_sysdev(cpu); 276 dev = get_cpu_device(cpu);
277 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); 277 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
278 } 278 }
279 return 1; 279 return 1;
280} 280}
@@ -296,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored)
296 set_topology_timer(); 296 set_topology_timer();
297} 297}
298 298
299static struct timer_list topology_timer =
300 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
301
302static atomic_t topology_poll = ATOMIC_INIT(0);
303
299static void set_topology_timer(void) 304static void set_topology_timer(void)
300{ 305{
301 topology_timer.function = topology_timer_fn; 306 if (atomic_add_unless(&topology_poll, -1, 0))
302 topology_timer.data = 0; 307 mod_timer(&topology_timer, jiffies + HZ / 10);
303 topology_timer.expires = jiffies + 60 * HZ; 308 else
304 add_timer(&topology_timer); 309 mod_timer(&topology_timer, jiffies + HZ * 60);
310}
311
312void topology_expect_change(void)
313{
314 if (!MACHINE_HAS_TOPOLOGY)
315 return;
316 /* This is racy, but it doesn't matter since it is just a heuristic.
317 * Worst case is that we poll in a higher frequency for a bit longer.
318 */
319 if (atomic_read(&topology_poll) > 60)
320 return;
321 atomic_add(60, &topology_poll);
322 set_topology_timer();
305} 323}
306 324
307static int __init early_parse_topology(char *p) 325static int __init early_parse_topology(char *p)
@@ -313,23 +331,6 @@ static int __init early_parse_topology(char *p)
313} 331}
314early_param("topology", early_parse_topology); 332early_param("topology", early_parse_topology);
315 333
316static int __init init_topology_update(void)
317{
318 int rc;
319
320 rc = 0;
321 if (!MACHINE_HAS_TOPOLOGY) {
322 topology_update_polarization_simple();
323 goto out;
324 }
325 init_timer_deferrable(&topology_timer);
326 set_topology_timer();
327out:
328 update_cpu_core_map();
329 return rc;
330}
331__initcall(init_topology_update);
332
333static void __init alloc_masks(struct sysinfo_15_1_x *info, 334static void __init alloc_masks(struct sysinfo_15_1_x *info,
334 struct mask_info *mask, int offset) 335 struct mask_info *mask, int offset)
335{ 336{
@@ -357,10 +358,108 @@ void __init s390_init_cpu_topology(void)
357 store_topology(info); 358 store_topology(info);
358 pr_info("The CPU configuration topology of the machine is:"); 359 pr_info("The CPU configuration topology of the machine is:");
359 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 360 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
360 printk(" %d", info->mag[i]); 361 printk(KERN_CONT " %d", info->mag[i]);
361 printk(" / %d\n", info->mnest); 362 printk(KERN_CONT " / %d\n", info->mnest);
362 alloc_masks(info, &core_info, 1); 363 alloc_masks(info, &core_info, 1);
363#ifdef CONFIG_SCHED_BOOK
364 alloc_masks(info, &book_info, 2); 364 alloc_masks(info, &book_info, 2);
365#endif
366} 365}
366
367static int cpu_management;
368
369static ssize_t dispatching_show(struct device *dev,
370 struct device_attribute *attr,
371 char *buf)
372{
373 ssize_t count;
374
375 mutex_lock(&smp_cpu_state_mutex);
376 count = sprintf(buf, "%d\n", cpu_management);
377 mutex_unlock(&smp_cpu_state_mutex);
378 return count;
379}
380
381static ssize_t dispatching_store(struct device *dev,
382 struct device_attribute *attr,
383 const char *buf,
384 size_t count)
385{
386 int val, rc;
387 char delim;
388
389 if (sscanf(buf, "%d %c", &val, &delim) != 1)
390 return -EINVAL;
391 if (val != 0 && val != 1)
392 return -EINVAL;
393 rc = 0;
394 get_online_cpus();
395 mutex_lock(&smp_cpu_state_mutex);
396 if (cpu_management == val)
397 goto out;
398 rc = topology_set_cpu_management(val);
399 if (rc)
400 goto out;
401 cpu_management = val;
402 topology_expect_change();
403out:
404 mutex_unlock(&smp_cpu_state_mutex);
405 put_online_cpus();
406 return rc ? rc : count;
407}
408static DEVICE_ATTR(dispatching, 0644, dispatching_show,
409 dispatching_store);
410
411static ssize_t cpu_polarization_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
413{
414 int cpu = dev->id;
415 ssize_t count;
416
417 mutex_lock(&smp_cpu_state_mutex);
418 switch (cpu_read_polarization(cpu)) {
419 case POLARIZATION_HRZ:
420 count = sprintf(buf, "horizontal\n");
421 break;
422 case POLARIZATION_VL:
423 count = sprintf(buf, "vertical:low\n");
424 break;
425 case POLARIZATION_VM:
426 count = sprintf(buf, "vertical:medium\n");
427 break;
428 case POLARIZATION_VH:
429 count = sprintf(buf, "vertical:high\n");
430 break;
431 default:
432 count = sprintf(buf, "unknown\n");
433 break;
434 }
435 mutex_unlock(&smp_cpu_state_mutex);
436 return count;
437}
438static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
439
440static struct attribute *topology_cpu_attrs[] = {
441 &dev_attr_polarization.attr,
442 NULL,
443};
444
445static struct attribute_group topology_cpu_attr_group = {
446 .attrs = topology_cpu_attrs,
447};
448
449int topology_cpu_init(struct cpu *cpu)
450{
451 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
452}
453
454static int __init topology_init(void)
455{
456 if (!MACHINE_HAS_TOPOLOGY) {
457 topology_update_polarization_simple();
458 goto out;
459 }
460 set_topology_timer();
461out:
462 update_cpu_core_map();
463 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
464}
465device_initcall(topology_init);