diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-12-27 05:27:16 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-27 05:27:12 -0500 |
commit | d68bddb7329a4d47d950d6b0745a7e274d230ed4 (patch) | |
tree | c3916656ddad9c86f58945f2b4b4cfac5e741f04 /arch/s390/kernel | |
parent | c5328901aa1db134325607d65527742d8be07f7d (diff) |
[S390] topology: increase poll frequency if change is anticipated
Increase cpu topology change poll frequency if a change is anticipated.
Otherwise a user might be a bit confused to have to wait up to a minute
in order to see a change this should be visible immediatly.
However there is no guarantee that the change will happen during the
time frame the poll frequency is increased.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 34 |
2 files changed, 28 insertions, 8 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 109e7422bb20..8aba77df68a9 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -867,6 +867,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
867 | if (!rc) { | 867 | if (!rc) { |
868 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 868 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; |
869 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 869 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
870 | topology_expect_change(); | ||
870 | } | 871 | } |
871 | } | 872 | } |
872 | break; | 873 | break; |
@@ -876,6 +877,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
876 | if (!rc) { | 877 | if (!rc) { |
877 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 878 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; |
878 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); | 879 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
880 | topology_expect_change(); | ||
879 | } | 881 | } |
880 | } | 882 | } |
881 | break; | 883 | break; |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 2abad3014928..e06fb852d386 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -31,7 +31,6 @@ struct mask_info { | |||
31 | static int topology_enabled = 1; | 31 | static int topology_enabled = 1; |
32 | static void topology_work_fn(struct work_struct *work); | 32 | static void topology_work_fn(struct work_struct *work); |
33 | static struct sysinfo_15_1_x *tl_info; | 33 | static struct sysinfo_15_1_x *tl_info; |
34 | static struct timer_list topology_timer; | ||
35 | static void set_topology_timer(void); | 34 | static void set_topology_timer(void); |
36 | static DECLARE_WORK(topology_work, topology_work_fn); | 35 | static DECLARE_WORK(topology_work, topology_work_fn); |
37 | /* topology_lock protects the core linked list */ | 36 | /* topology_lock protects the core linked list */ |
@@ -297,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored) | |||
297 | set_topology_timer(); | 296 | set_topology_timer(); |
298 | } | 297 | } |
299 | 298 | ||
299 | static struct timer_list topology_timer = | ||
300 | TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); | ||
301 | |||
302 | static atomic_t topology_poll = ATOMIC_INIT(0); | ||
303 | |||
300 | static void set_topology_timer(void) | 304 | static void set_topology_timer(void) |
301 | { | 305 | { |
302 | topology_timer.function = topology_timer_fn; | 306 | if (atomic_add_unless(&topology_poll, -1, 0)) |
303 | topology_timer.data = 0; | 307 | mod_timer(&topology_timer, jiffies + HZ / 10); |
304 | topology_timer.expires = jiffies + 60 * HZ; | 308 | else |
305 | add_timer(&topology_timer); | 309 | mod_timer(&topology_timer, jiffies + HZ * 60); |
310 | } | ||
311 | |||
312 | void topology_expect_change(void) | ||
313 | { | ||
314 | if (!MACHINE_HAS_TOPOLOGY) | ||
315 | return; | ||
316 | /* This is racy, but it doesn't matter since it is just a heuristic. | ||
317 | * Worst case is that we poll in a higher frequency for a bit longer. | ||
318 | */ | ||
319 | if (atomic_read(&topology_poll) > 60) | ||
320 | return; | ||
321 | atomic_add(60, &topology_poll); | ||
322 | set_topology_timer(); | ||
306 | } | 323 | } |
307 | 324 | ||
308 | static int __init early_parse_topology(char *p) | 325 | static int __init early_parse_topology(char *p) |
@@ -379,8 +396,10 @@ static ssize_t dispatching_store(struct sysdev_class *dev, | |||
379 | if (cpu_management == val) | 396 | if (cpu_management == val) |
380 | goto out; | 397 | goto out; |
381 | rc = topology_set_cpu_management(val); | 398 | rc = topology_set_cpu_management(val); |
382 | if (!rc) | 399 | if (rc) |
383 | cpu_management = val; | 400 | goto out; |
401 | cpu_management = val; | ||
402 | topology_expect_change(); | ||
384 | out: | 403 | out: |
385 | mutex_unlock(&smp_cpu_state_mutex); | 404 | mutex_unlock(&smp_cpu_state_mutex); |
386 | put_online_cpus(); | 405 | put_online_cpus(); |
@@ -438,7 +457,6 @@ static int __init topology_init(void) | |||
438 | topology_update_polarization_simple(); | 457 | topology_update_polarization_simple(); |
439 | goto out; | 458 | goto out; |
440 | } | 459 | } |
441 | init_timer_deferrable(&topology_timer); | ||
442 | set_topology_timer(); | 460 | set_topology_timer(); |
443 | out: | 461 | out: |
444 | update_cpu_core_map(); | 462 | update_cpu_core_map(); |