diff options
author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2014-05-06 13:41:36 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-05-20 02:58:52 -0400 |
commit | 2e4006b34d06681ed95d55510d4450f29a13c417 (patch) | |
tree | 0ff439559cfcfc2bac5eb1b41d6412694fd49a03 | |
parent | f1a858206804a5a694f30196e50756b86eb7d68c (diff) |
s390/spinlock: fix system hang with spin_retry <= 0
On LPAR, when spin_retry is set to <= 0, arch_spin_lock_wait() and
arch_spin_lock_wait_flags() may end up in a while(1) loop w/o doing
any compare and swap operation. To fix this, use do/while instead of
for loop.
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | arch/s390/lib/spinlock.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 3ca9de4d9cb9..3f0e682b7e62 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -26,19 +26,20 @@ __setup("spin_retry=", spin_retry_setup); | |||
26 | 26 | ||
27 | void arch_spin_lock_wait(arch_spinlock_t *lp) | 27 | void arch_spin_lock_wait(arch_spinlock_t *lp) |
28 | { | 28 | { |
29 | int count = spin_retry; | ||
30 | unsigned int cpu = SPINLOCK_LOCKVAL; | 29 | unsigned int cpu = SPINLOCK_LOCKVAL; |
31 | unsigned int owner; | 30 | unsigned int owner; |
31 | int count; | ||
32 | 32 | ||
33 | while (1) { | 33 | while (1) { |
34 | owner = lp->lock; | 34 | owner = lp->lock; |
35 | if (!owner || smp_vcpu_scheduled(~owner)) { | 35 | if (!owner || smp_vcpu_scheduled(~owner)) { |
36 | for (count = spin_retry; count > 0; count--) { | 36 | count = spin_retry; |
37 | do { | ||
37 | if (arch_spin_is_locked(lp)) | 38 | if (arch_spin_is_locked(lp)) |
38 | continue; | 39 | continue; |
39 | if (_raw_compare_and_swap(&lp->lock, 0, cpu)) | 40 | if (_raw_compare_and_swap(&lp->lock, 0, cpu)) |
40 | return; | 41 | return; |
41 | } | 42 | } while (count-- > 0); |
42 | if (MACHINE_IS_LPAR) | 43 | if (MACHINE_IS_LPAR) |
43 | continue; | 44 | continue; |
44 | } | 45 | } |
@@ -53,22 +54,23 @@ EXPORT_SYMBOL(arch_spin_lock_wait); | |||
53 | 54 | ||
54 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | 55 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) |
55 | { | 56 | { |
56 | int count = spin_retry; | ||
57 | unsigned int cpu = SPINLOCK_LOCKVAL; | 57 | unsigned int cpu = SPINLOCK_LOCKVAL; |
58 | unsigned int owner; | 58 | unsigned int owner; |
59 | int count; | ||
59 | 60 | ||
60 | local_irq_restore(flags); | 61 | local_irq_restore(flags); |
61 | while (1) { | 62 | while (1) { |
62 | owner = lp->lock; | 63 | owner = lp->lock; |
63 | if (!owner || smp_vcpu_scheduled(~owner)) { | 64 | if (!owner || smp_vcpu_scheduled(~owner)) { |
64 | for (count = spin_retry; count > 0; count--) { | 65 | count = spin_retry; |
66 | do { | ||
65 | if (arch_spin_is_locked(lp)) | 67 | if (arch_spin_is_locked(lp)) |
66 | continue; | 68 | continue; |
67 | local_irq_disable(); | 69 | local_irq_disable(); |
68 | if (_raw_compare_and_swap(&lp->lock, 0, cpu)) | 70 | if (_raw_compare_and_swap(&lp->lock, 0, cpu)) |
69 | return; | 71 | return; |
70 | local_irq_restore(flags); | 72 | local_irq_restore(flags); |
71 | } | 73 | } while (count-- > 0); |
72 | if (MACHINE_IS_LPAR) | 74 | if (MACHINE_IS_LPAR) |
73 | continue; | 75 | continue; |
74 | } | 76 | } |