diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/topology.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 34 |
3 files changed, 30 insertions, 8 deletions
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 7016dd7b6bc4..0837de80c351 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -35,11 +35,13 @@ int topology_cpu_init(struct cpu *); | |||
35 | int topology_set_cpu_management(int fc); | 35 | int topology_set_cpu_management(int fc); |
36 | void topology_schedule_update(void); | 36 | void topology_schedule_update(void); |
37 | void store_topology(struct sysinfo_15_1_x *info); | 37 | void store_topology(struct sysinfo_15_1_x *info); |
38 | void topology_expect_change(void); | ||
38 | 39 | ||
39 | #else /* CONFIG_SCHED_BOOK */ | 40 | #else /* CONFIG_SCHED_BOOK */ |
40 | 41 | ||
41 | static inline void topology_schedule_update(void) { } | 42 | static inline void topology_schedule_update(void) { } |
42 | static inline int topology_cpu_init(struct cpu *cpu) { return 0; } | 43 | static inline int topology_cpu_init(struct cpu *cpu) { return 0; } |
44 | static inline void topology_expect_change(void) { } | ||
43 | 45 | ||
44 | #endif /* CONFIG_SCHED_BOOK */ | 46 | #endif /* CONFIG_SCHED_BOOK */ |
45 | 47 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 109e7422bb20..8aba77df68a9 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -867,6 +867,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
867 | if (!rc) { | 867 | if (!rc) { |
868 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 868 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; |
869 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 869 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
870 | topology_expect_change(); | ||
870 | } | 871 | } |
871 | } | 872 | } |
872 | break; | 873 | break; |
@@ -876,6 +877,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
876 | if (!rc) { | 877 | if (!rc) { |
877 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 878 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; |
878 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 879 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
880 | topology_expect_change(); | ||
879 | } | 881 | } |
880 | } | 882 | } |
881 | break; | 883 | break; |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 2abad3014928..e06fb852d386 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -31,7 +31,6 @@ struct mask_info { | |||
31 | static int topology_enabled = 1; | 31 | static int topology_enabled = 1; |
32 | static void topology_work_fn(struct work_struct *work); | 32 | static void topology_work_fn(struct work_struct *work); |
33 | static struct sysinfo_15_1_x *tl_info; | 33 | static struct sysinfo_15_1_x *tl_info; |
34 | static struct timer_list topology_timer; | ||
35 | static void set_topology_timer(void); | 34 | static void set_topology_timer(void); |
36 | static DECLARE_WORK(topology_work, topology_work_fn); | 35 | static DECLARE_WORK(topology_work, topology_work_fn); |
37 | /* topology_lock protects the core linked list */ | 36 | /* topology_lock protects the core linked list */ |
@@ -297,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored) | |||
297 | set_topology_timer(); | 296 | set_topology_timer(); |
298 | } | 297 | } |
299 | 298 | ||
299 | static struct timer_list topology_timer = | ||
300 | TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); | ||
301 | |||
302 | static atomic_t topology_poll = ATOMIC_INIT(0); | ||
303 | |||
300 | static void set_topology_timer(void) | 304 | static void set_topology_timer(void) |
301 | { | 305 | { |
302 | topology_timer.function = topology_timer_fn; | 306 | if (atomic_add_unless(&topology_poll, -1, 0)) |
303 | topology_timer.data = 0; | 307 | mod_timer(&topology_timer, jiffies + HZ / 10); |
304 | topology_timer.expires = jiffies + 60 * HZ; | 308 | else |
305 | add_timer(&topology_timer); | 309 | mod_timer(&topology_timer, jiffies + HZ * 60); |
310 | } | ||
311 | |||
312 | void topology_expect_change(void) | ||
313 | { | ||
314 | if (!MACHINE_HAS_TOPOLOGY) | ||
315 | return; | ||
316 | /* This is racy, but it doesn't matter since it is just a heuristic. | ||
317 | * Worst case is that we poll in a higher frequency for a bit longer. | ||
318 | */ | ||
319 | if (atomic_read(&topology_poll) > 60) | ||
320 | return; | ||
321 | atomic_add(60, &topology_poll); | ||
322 | set_topology_timer(); | ||
306 | } | 323 | } |
307 | 324 | ||
308 | static int __init early_parse_topology(char *p) | 325 | static int __init early_parse_topology(char *p) |
@@ -379,8 +396,10 @@ static ssize_t dispatching_store(struct sysdev_class *dev, | |||
379 | if (cpu_management == val) | 396 | if (cpu_management == val) |
380 | goto out; | 397 | goto out; |
381 | rc = topology_set_cpu_management(val); | 398 | rc = topology_set_cpu_management(val); |
382 | if (!rc) | 399 | if (rc) |
383 | cpu_management = val; | 400 | goto out; |
401 | cpu_management = val; | ||
402 | topology_expect_change(); | ||
384 | out: | 403 | out: |
385 | mutex_unlock(&smp_cpu_state_mutex); | 404 | mutex_unlock(&smp_cpu_state_mutex); |
386 | put_online_cpus(); | 405 | put_online_cpus(); |
@@ -438,7 +457,6 @@ static int __init topology_init(void) | |||
438 | topology_update_polarization_simple(); | 457 | topology_update_polarization_simple(); |
439 | goto out; | 458 | goto out; |
440 | } | 459 | } |
441 | init_timer_deferrable(&topology_timer); | ||
442 | set_topology_timer(); | 460 | set_topology_timer(); |
443 | out: | 461 | out: |
444 | update_cpu_core_map(); | 462 | update_cpu_core_map(); |