aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>2014-04-07 12:25:23 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-20 02:58:41 -0400
commit5b3f683e694a835f5dfdab06102be1a50604c3b7 (patch)
treed9f1b030cd5d211c2a014194a288ad807f20308e /arch/s390/lib
parent50be634507284eea38df78154d22615d21200b42 (diff)
s390/spinlock: cleanup spinlock code
Improve the spinlock code in several aspects: - Have _raw_compare_and_swap return true if the operation has been successful instead of returning the old value. - Remove the "volatile" from arch_spinlock_t and arch_rwlock_t - Rename 'owner_cpu' to 'lock' - Add helper functions arch_spin_trylock_once / arch_spin_tryrelease_once [ Martin Schwidefsky: patch breakdown and code beautification ] Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/spinlock.c55
1 files changed, 26 insertions, 29 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f709983f41f8..4a3b33b2dbb9 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -31,22 +31,21 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
31 unsigned int owner; 31 unsigned int owner;
32 32
33 while (1) { 33 while (1) {
34 owner = lp->owner_cpu; 34 owner = lp->lock;
35 if (!owner || smp_vcpu_scheduled(~owner)) { 35 if (!owner || smp_vcpu_scheduled(~owner)) {
36 for (count = spin_retry; count > 0; count--) { 36 for (count = spin_retry; count > 0; count--) {
37 if (arch_spin_is_locked(lp)) 37 if (arch_spin_is_locked(lp))
38 continue; 38 continue;
39 if (_raw_compare_and_swap(&lp->owner_cpu, 0, 39 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
40 cpu) == 0)
41 return; 40 return;
42 } 41 }
43 if (MACHINE_IS_LPAR) 42 if (MACHINE_IS_LPAR)
44 continue; 43 continue;
45 } 44 }
46 owner = lp->owner_cpu; 45 owner = lp->lock;
47 if (owner) 46 if (owner)
48 smp_yield_cpu(~owner); 47 smp_yield_cpu(~owner);
49 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 48 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
50 return; 49 return;
51 } 50 }
52} 51}
@@ -60,57 +59,55 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
60 59
61 local_irq_restore(flags); 60 local_irq_restore(flags);
62 while (1) { 61 while (1) {
63 owner = lp->owner_cpu; 62 owner = lp->lock;
64 if (!owner || smp_vcpu_scheduled(~owner)) { 63 if (!owner || smp_vcpu_scheduled(~owner)) {
65 for (count = spin_retry; count > 0; count--) { 64 for (count = spin_retry; count > 0; count--) {
66 if (arch_spin_is_locked(lp)) 65 if (arch_spin_is_locked(lp))
67 continue; 66 continue;
68 local_irq_disable(); 67 local_irq_disable();
69 if (_raw_compare_and_swap(&lp->owner_cpu, 0, 68 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
70 cpu) == 0)
71 return; 69 return;
72 local_irq_restore(flags); 70 local_irq_restore(flags);
73 } 71 }
74 if (MACHINE_IS_LPAR) 72 if (MACHINE_IS_LPAR)
75 continue; 73 continue;
76 } 74 }
77 owner = lp->owner_cpu; 75 owner = lp->lock;
78 if (owner) 76 if (owner)
79 smp_yield_cpu(~owner); 77 smp_yield_cpu(~owner);
80 local_irq_disable(); 78 local_irq_disable();
81 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 79 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
82 return; 80 return;
83 local_irq_restore(flags); 81 local_irq_restore(flags);
84 } 82 }
85} 83}
86EXPORT_SYMBOL(arch_spin_lock_wait_flags); 84EXPORT_SYMBOL(arch_spin_lock_wait_flags);
87 85
86void arch_spin_relax(arch_spinlock_t *lp)
87{
88 unsigned int cpu = lp->lock;
89 if (cpu != 0) {
90 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
91 !smp_vcpu_scheduled(~cpu))
92 smp_yield_cpu(~cpu);
93 }
94}
95EXPORT_SYMBOL(arch_spin_relax);
96
88int arch_spin_trylock_retry(arch_spinlock_t *lp) 97int arch_spin_trylock_retry(arch_spinlock_t *lp)
89{ 98{
90 unsigned int cpu = ~smp_processor_id();
91 int count; 99 int count;
92 100
93 for (count = spin_retry; count > 0; count--) { 101 for (count = spin_retry; count > 0; count--) {
94 if (arch_spin_is_locked(lp)) 102 if (arch_spin_is_locked(lp))
95 continue; 103 continue;
96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 104 if (arch_spin_trylock_once(lp))
97 return 1; 105 return 1;
98 } 106 }
99 return 0; 107 return 0;
100} 108}
101EXPORT_SYMBOL(arch_spin_trylock_retry); 109EXPORT_SYMBOL(arch_spin_trylock_retry);
102 110
103void arch_spin_relax(arch_spinlock_t *lock)
104{
105 unsigned int cpu = lock->owner_cpu;
106 if (cpu != 0) {
107 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
108 !smp_vcpu_scheduled(~cpu))
109 smp_yield_cpu(~cpu);
110 }
111}
112EXPORT_SYMBOL(arch_spin_relax);
113
114void _raw_read_lock_wait(arch_rwlock_t *rw) 111void _raw_read_lock_wait(arch_rwlock_t *rw)
115{ 112{
116 unsigned int old; 113 unsigned int old;
@@ -124,7 +121,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
124 if (!arch_read_can_lock(rw)) 121 if (!arch_read_can_lock(rw))
125 continue; 122 continue;
126 old = rw->lock & 0x7fffffffU; 123 old = rw->lock & 0x7fffffffU;
127 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 124 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
128 return; 125 return;
129 } 126 }
130} 127}
@@ -145,7 +142,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
145 continue; 142 continue;
146 old = rw->lock & 0x7fffffffU; 143 old = rw->lock & 0x7fffffffU;
147 local_irq_disable(); 144 local_irq_disable();
148 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 145 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
149 return; 146 return;
150 } 147 }
151} 148}
@@ -160,7 +157,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
160 if (!arch_read_can_lock(rw)) 157 if (!arch_read_can_lock(rw))
161 continue; 158 continue;
162 old = rw->lock & 0x7fffffffU; 159 old = rw->lock & 0x7fffffffU;
163 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 160 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
164 return 1; 161 return 1;
165 } 162 }
166 return 0; 163 return 0;
@@ -178,7 +175,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
178 } 175 }
179 if (!arch_write_can_lock(rw)) 176 if (!arch_write_can_lock(rw))
180 continue; 177 continue;
181 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 178 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
182 return; 179 return;
183 } 180 }
184} 181}
@@ -197,7 +194,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
197 if (!arch_write_can_lock(rw)) 194 if (!arch_write_can_lock(rw))
198 continue; 195 continue;
199 local_irq_disable(); 196 local_irq_disable();
200 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 197 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
201 return; 198 return;
202 } 199 }
203} 200}
@@ -210,7 +207,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
210 while (count-- > 0) { 207 while (count-- > 0) {
211 if (!arch_write_can_lock(rw)) 208 if (!arch_write_can_lock(rw))
212 continue; 209 continue;
213 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 210 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
214 return 1; 211 return 1;
215 } 212 }
216 return 0; 213 return 0;