aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>2014-04-07 12:25:23 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-20 02:58:41 -0400
commit5b3f683e694a835f5dfdab06102be1a50604c3b7 (patch)
treed9f1b030cd5d211c2a014194a288ad807f20308e
parent50be634507284eea38df78154d22615d21200b42 (diff)
s390/spinlock: cleanup spinlock code
Improve the spinlock code in several aspects: - Have _raw_compare_and_swap return true if the operation has been successful instead of returning the old value. - Remove the "volatile" from arch_spinlock_t and arch_rwlock_t - Rename 'owner_cpu' to 'lock' - Add helper functions arch_spin_trylock_once / arch_spin_tryrelease_once [ Martin Schwidefsky: patch breakdown and code beautification ] Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/spinlock.h98
-rw-r--r--arch/s390/include/asm/spinlock_types.h6
-rw-r--r--arch/s390/lib/spinlock.c55
3 files changed, 82 insertions, 77 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 83e5d216105e..b60212a02d08 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -14,15 +14,16 @@
14extern int spin_retry; 14extern int spin_retry;
15 15
16static inline int 16static inline int
17_raw_compare_and_swap(volatile unsigned int *lock, 17_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
18 unsigned int old, unsigned int new)
19{ 18{
19 unsigned int old_expected = old;
20
20 asm volatile( 21 asm volatile(
21 " cs %0,%3,%1" 22 " cs %0,%3,%1"
22 : "=d" (old), "=Q" (*lock) 23 : "=d" (old), "=Q" (*lock)
23 : "0" (old), "d" (new), "Q" (*lock) 24 : "0" (old), "d" (new), "Q" (*lock)
24 : "cc", "memory" ); 25 : "cc", "memory" );
25 return old; 26 return old == old_expected;
26} 27}
27 28
28/* 29/*
@@ -34,57 +35,66 @@ _raw_compare_and_swap(volatile unsigned int *lock,
34 * (the type definitions are in asm/spinlock_types.h) 35 * (the type definitions are in asm/spinlock_types.h)
35 */ 36 */
36 37
37#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) 38void arch_spin_lock_wait(arch_spinlock_t *);
38#define arch_spin_unlock_wait(lock) \ 39int arch_spin_trylock_retry(arch_spinlock_t *);
39 do { while (arch_spin_is_locked(lock)) \ 40void arch_spin_relax(arch_spinlock_t *);
40 arch_spin_relax(lock); } while (0) 41void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
41
42extern void arch_spin_lock_wait(arch_spinlock_t *);
43extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44extern int arch_spin_trylock_retry(arch_spinlock_t *);
45extern void arch_spin_relax(arch_spinlock_t *lock);
46 42
47static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 43static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
48{ 44{
49 return lock.owner_cpu == 0; 45 return lock.lock == 0;
50} 46}
51 47
52static inline void arch_spin_lock(arch_spinlock_t *lp) 48static inline int arch_spin_is_locked(arch_spinlock_t *lp)
49{
50 return ACCESS_ONCE(lp->lock) != 0;
51}
52
53static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
53{ 54{
54 int old; 55 unsigned int new = ~smp_processor_id();
55 56
56 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 57 return _raw_compare_and_swap(&lp->lock, 0, new);
57 if (likely(old == 0))
58 return;
59 arch_spin_lock_wait(lp);
60} 58}
61 59
62static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 60static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
63 unsigned long flags)
64{ 61{
65 int old; 62 unsigned int old = ~smp_processor_id();
66 63
67 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 64 return _raw_compare_and_swap(&lp->lock, old, 0);
68 if (likely(old == 0))
69 return;
70 arch_spin_lock_wait_flags(lp, flags);
71} 65}
72 66
73static inline int arch_spin_trylock(arch_spinlock_t *lp) 67static inline void arch_spin_lock(arch_spinlock_t *lp)
74{ 68{
75 int old; 69 if (unlikely(!arch_spin_trylock_once(lp)))
70 arch_spin_lock_wait(lp);
71}
76 72
77 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 73static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
78 if (likely(old == 0)) 74 unsigned long flags)
79 return 1; 75{
80 return arch_spin_trylock_retry(lp); 76 if (unlikely(!arch_spin_trylock_once(lp)))
77 arch_spin_lock_wait_flags(lp, flags);
78}
79
80static inline int arch_spin_trylock(arch_spinlock_t *lp)
81{
82 if (unlikely(!arch_spin_trylock_once(lp)))
83 return arch_spin_trylock_retry(lp);
84 return 1;
81} 85}
82 86
83static inline void arch_spin_unlock(arch_spinlock_t *lp) 87static inline void arch_spin_unlock(arch_spinlock_t *lp)
84{ 88{
85 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 89 arch_spin_tryrelease_once(lp);
86} 90}
87 91
92static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
93{
94 while (arch_spin_is_locked(lock))
95 arch_spin_relax(lock);
96}
97
88/* 98/*
89 * Read-write spinlocks, allowing multiple readers 99 * Read-write spinlocks, allowing multiple readers
90 * but only one writer. 100 * but only one writer.
@@ -119,7 +129,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
119{ 129{
120 unsigned int old; 130 unsigned int old;
121 old = rw->lock & 0x7fffffffU; 131 old = rw->lock & 0x7fffffffU;
122 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) 132 if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
123 _raw_read_lock_wait(rw); 133 _raw_read_lock_wait(rw);
124} 134}
125 135
@@ -127,30 +137,28 @@ static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
127{ 137{
128 unsigned int old; 138 unsigned int old;
129 old = rw->lock & 0x7fffffffU; 139 old = rw->lock & 0x7fffffffU;
130 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) 140 if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
131 _raw_read_lock_wait_flags(rw, flags); 141 _raw_read_lock_wait_flags(rw, flags);
132} 142}
133 143
134static inline void arch_read_unlock(arch_rwlock_t *rw) 144static inline void arch_read_unlock(arch_rwlock_t *rw)
135{ 145{
136 unsigned int old, cmp; 146 unsigned int old;
137 147
138 old = rw->lock;
139 do { 148 do {
140 cmp = old; 149 old = ACCESS_ONCE(rw->lock);
141 old = _raw_compare_and_swap(&rw->lock, old, old - 1); 150 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
142 } while (cmp != old);
143} 151}
144 152
145static inline void arch_write_lock(arch_rwlock_t *rw) 153static inline void arch_write_lock(arch_rwlock_t *rw)
146{ 154{
147 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 155 if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
148 _raw_write_lock_wait(rw); 156 _raw_write_lock_wait(rw);
149} 157}
150 158
151static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 159static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
152{ 160{
153 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 161 if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
154 _raw_write_lock_wait_flags(rw, flags); 162 _raw_write_lock_wait_flags(rw, flags);
155} 163}
156 164
@@ -163,14 +171,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
163{ 171{
164 unsigned int old; 172 unsigned int old;
165 old = rw->lock & 0x7fffffffU; 173 old = rw->lock & 0x7fffffffU;
166 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) 174 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1)))
167 return 1; 175 return 1;
168 return _raw_read_trylock_retry(rw); 176 return _raw_read_trylock_retry(rw);
169} 177}
170 178
171static inline int arch_write_trylock(arch_rwlock_t *rw) 179static inline int arch_write_trylock(arch_rwlock_t *rw)
172{ 180{
173 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) 181 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
174 return 1; 182 return 1;
175 return _raw_write_trylock_retry(rw); 183 return _raw_write_trylock_retry(rw);
176} 184}
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 9c76656a0af0..b2cd6ff7c2c5 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -6,13 +6,13 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int owner_cpu; 9 unsigned int lock;
10} __attribute__ ((aligned (4))) arch_spinlock_t; 10} __attribute__ ((aligned (4))) arch_spinlock_t;
11 11
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 unsigned int lock;
16} arch_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __ARCH_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f709983f41f8..4a3b33b2dbb9 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -31,22 +31,21 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
31 unsigned int owner; 31 unsigned int owner;
32 32
33 while (1) { 33 while (1) {
34 owner = lp->owner_cpu; 34 owner = lp->lock;
35 if (!owner || smp_vcpu_scheduled(~owner)) { 35 if (!owner || smp_vcpu_scheduled(~owner)) {
36 for (count = spin_retry; count > 0; count--) { 36 for (count = spin_retry; count > 0; count--) {
37 if (arch_spin_is_locked(lp)) 37 if (arch_spin_is_locked(lp))
38 continue; 38 continue;
39 if (_raw_compare_and_swap(&lp->owner_cpu, 0, 39 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
40 cpu) == 0)
41 return; 40 return;
42 } 41 }
43 if (MACHINE_IS_LPAR) 42 if (MACHINE_IS_LPAR)
44 continue; 43 continue;
45 } 44 }
46 owner = lp->owner_cpu; 45 owner = lp->lock;
47 if (owner) 46 if (owner)
48 smp_yield_cpu(~owner); 47 smp_yield_cpu(~owner);
49 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 48 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
50 return; 49 return;
51 } 50 }
52} 51}
@@ -60,57 +59,55 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
60 59
61 local_irq_restore(flags); 60 local_irq_restore(flags);
62 while (1) { 61 while (1) {
63 owner = lp->owner_cpu; 62 owner = lp->lock;
64 if (!owner || smp_vcpu_scheduled(~owner)) { 63 if (!owner || smp_vcpu_scheduled(~owner)) {
65 for (count = spin_retry; count > 0; count--) { 64 for (count = spin_retry; count > 0; count--) {
66 if (arch_spin_is_locked(lp)) 65 if (arch_spin_is_locked(lp))
67 continue; 66 continue;
68 local_irq_disable(); 67 local_irq_disable();
69 if (_raw_compare_and_swap(&lp->owner_cpu, 0, 68 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
70 cpu) == 0)
71 return; 69 return;
72 local_irq_restore(flags); 70 local_irq_restore(flags);
73 } 71 }
74 if (MACHINE_IS_LPAR) 72 if (MACHINE_IS_LPAR)
75 continue; 73 continue;
76 } 74 }
77 owner = lp->owner_cpu; 75 owner = lp->lock;
78 if (owner) 76 if (owner)
79 smp_yield_cpu(~owner); 77 smp_yield_cpu(~owner);
80 local_irq_disable(); 78 local_irq_disable();
81 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 79 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
82 return; 80 return;
83 local_irq_restore(flags); 81 local_irq_restore(flags);
84 } 82 }
85} 83}
86EXPORT_SYMBOL(arch_spin_lock_wait_flags); 84EXPORT_SYMBOL(arch_spin_lock_wait_flags);
87 85
86void arch_spin_relax(arch_spinlock_t *lp)
87{
88 unsigned int cpu = lp->lock;
89 if (cpu != 0) {
90 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
91 !smp_vcpu_scheduled(~cpu))
92 smp_yield_cpu(~cpu);
93 }
94}
95EXPORT_SYMBOL(arch_spin_relax);
96
88int arch_spin_trylock_retry(arch_spinlock_t *lp) 97int arch_spin_trylock_retry(arch_spinlock_t *lp)
89{ 98{
90 unsigned int cpu = ~smp_processor_id();
91 int count; 99 int count;
92 100
93 for (count = spin_retry; count > 0; count--) { 101 for (count = spin_retry; count > 0; count--) {
94 if (arch_spin_is_locked(lp)) 102 if (arch_spin_is_locked(lp))
95 continue; 103 continue;
96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 104 if (arch_spin_trylock_once(lp))
97 return 1; 105 return 1;
98 } 106 }
99 return 0; 107 return 0;
100} 108}
101EXPORT_SYMBOL(arch_spin_trylock_retry); 109EXPORT_SYMBOL(arch_spin_trylock_retry);
102 110
103void arch_spin_relax(arch_spinlock_t *lock)
104{
105 unsigned int cpu = lock->owner_cpu;
106 if (cpu != 0) {
107 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
108 !smp_vcpu_scheduled(~cpu))
109 smp_yield_cpu(~cpu);
110 }
111}
112EXPORT_SYMBOL(arch_spin_relax);
113
114void _raw_read_lock_wait(arch_rwlock_t *rw) 111void _raw_read_lock_wait(arch_rwlock_t *rw)
115{ 112{
116 unsigned int old; 113 unsigned int old;
@@ -124,7 +121,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
124 if (!arch_read_can_lock(rw)) 121 if (!arch_read_can_lock(rw))
125 continue; 122 continue;
126 old = rw->lock & 0x7fffffffU; 123 old = rw->lock & 0x7fffffffU;
127 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 124 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
128 return; 125 return;
129 } 126 }
130} 127}
@@ -145,7 +142,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
145 continue; 142 continue;
146 old = rw->lock & 0x7fffffffU; 143 old = rw->lock & 0x7fffffffU;
147 local_irq_disable(); 144 local_irq_disable();
148 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 145 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
149 return; 146 return;
150 } 147 }
151} 148}
@@ -160,7 +157,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
160 if (!arch_read_can_lock(rw)) 157 if (!arch_read_can_lock(rw))
161 continue; 158 continue;
162 old = rw->lock & 0x7fffffffU; 159 old = rw->lock & 0x7fffffffU;
163 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 160 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
164 return 1; 161 return 1;
165 } 162 }
166 return 0; 163 return 0;
@@ -178,7 +175,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
178 } 175 }
179 if (!arch_write_can_lock(rw)) 176 if (!arch_write_can_lock(rw))
180 continue; 177 continue;
181 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 178 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
182 return; 179 return;
183 } 180 }
184} 181}
@@ -197,7 +194,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
197 if (!arch_write_can_lock(rw)) 194 if (!arch_write_can_lock(rw))
198 continue; 195 continue;
199 local_irq_disable(); 196 local_irq_disable();
200 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 197 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
201 return; 198 return;
202 } 199 }
203} 200}
@@ -210,7 +207,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
210 while (count-- > 0) { 207 while (count-- > 0) {
211 if (!arch_write_can_lock(rw)) 208 if (!arch_write_can_lock(rw))
212 continue; 209 continue;
213 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 210 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
214 return 1; 211 return 1;
215 } 212 }
216 return 0; 213 return 0;