aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib/spinlock.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-16 09:11:12 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-20 02:58:55 -0400
commit470ada6b1a1d80a173586c036f84e2c3a486ebf9 (patch)
tree44fd6a61db1666e1c5239392b17b5c6f21ec40b0 /arch/s390/lib/spinlock.c
parent939c5ae4029e1679bb93f7d09afb8c831db985bd (diff)
s390/spinlock: refactor arch_spin_lock_wait[_flags]
Reorder the spinlock wait code to make it more readable. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib/spinlock.c')
-rw-r--r--arch/s390/lib/spinlock.c81
1 files changed, 47 insertions, 34 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 1dd282c742b5..5b0e445bc3f3 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
31 int count; 31 int count;
32 32
33 while (1) { 33 while (1) {
34 owner = lp->lock; 34 owner = ACCESS_ONCE(lp->lock);
35 if (!owner || smp_vcpu_scheduled(~owner)) { 35 /* Try to get the lock if it is free. */
36 count = spin_retry; 36 if (!owner) {
37 do { 37 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
38 if (arch_spin_is_locked(lp)) 38 return;
39 continue; 39 continue;
40 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
41 return;
42 } while (count-- > 0);
43 if (MACHINE_IS_LPAR)
44 continue;
45 } 40 }
46 owner = lp->lock; 41 /* Check if the lock owner is running. */
47 if (owner) 42 if (!smp_vcpu_scheduled(~owner)) {
43 smp_yield_cpu(~owner);
44 continue;
45 }
46 /* Loop for a while on the lock value. */
47 count = spin_retry;
48 do {
49 owner = ACCESS_ONCE(lp->lock);
50 } while (owner && count-- > 0);
51 if (!owner)
52 continue;
53 /*
54 * For multiple layers of hypervisors, e.g. z/VM + LPAR
55 * yield the CPU if the lock is still unavailable.
56 */
57 if (!MACHINE_IS_LPAR)
48 smp_yield_cpu(~owner); 58 smp_yield_cpu(~owner);
49 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
50 return;
51 } 59 }
52} 60}
53EXPORT_SYMBOL(arch_spin_lock_wait); 61EXPORT_SYMBOL(arch_spin_lock_wait);
@@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
60 68
61 local_irq_restore(flags); 69 local_irq_restore(flags);
62 while (1) { 70 while (1) {
63 owner = lp->lock; 71 owner = ACCESS_ONCE(lp->lock);
64 if (!owner || smp_vcpu_scheduled(~owner)) { 72 /* Try to get the lock if it is free. */
65 count = spin_retry; 73 if (!owner) {
66 do { 74 local_irq_disable();
67 if (arch_spin_is_locked(lp)) 75 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
68 continue; 76 return;
69 local_irq_disable(); 77 local_irq_restore(flags);
70 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
71 return;
72 local_irq_restore(flags);
73 } while (count-- > 0);
74 if (MACHINE_IS_LPAR)
75 continue;
76 } 78 }
77 owner = lp->lock; 79 /* Check if the lock owner is running. */
78 if (owner) 80 if (!smp_vcpu_scheduled(~owner)) {
81 smp_yield_cpu(~owner);
82 continue;
83 }
84 /* Loop for a while on the lock value. */
85 count = spin_retry;
86 do {
87 owner = ACCESS_ONCE(lp->lock);
88 } while (owner && count-- > 0);
89 if (!owner)
90 continue;
91 /*
92 * For multiple layers of hypervisors, e.g. z/VM + LPAR
93 * yield the CPU if the lock is still unavailable.
94 */
95 if (!MACHINE_IS_LPAR)
79 smp_yield_cpu(~owner); 96 smp_yield_cpu(~owner);
80 local_irq_disable();
81 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
82 return;
83 local_irq_restore(flags);
84 } 97 }
85} 98}
86EXPORT_SYMBOL(arch_spin_lock_wait_flags); 99EXPORT_SYMBOL(arch_spin_lock_wait_flags);