diff options
Diffstat (limited to 'arch/s390/include/asm/spinlock.h')
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 98 |
1 files changed, 53 insertions, 45 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 83e5d216105e..b60212a02d08 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -14,15 +14,16 @@ | |||
14 | extern int spin_retry; | 14 | extern int spin_retry; |
15 | 15 | ||
16 | static inline int | 16 | static inline int |
17 | _raw_compare_and_swap(volatile unsigned int *lock, | 17 | _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) |
18 | unsigned int old, unsigned int new) | ||
19 | { | 18 | { |
19 | unsigned int old_expected = old; | ||
20 | |||
20 | asm volatile( | 21 | asm volatile( |
21 | " cs %0,%3,%1" | 22 | " cs %0,%3,%1" |
22 | : "=d" (old), "=Q" (*lock) | 23 | : "=d" (old), "=Q" (*lock) |
23 | : "0" (old), "d" (new), "Q" (*lock) | 24 | : "0" (old), "d" (new), "Q" (*lock) |
24 | : "cc", "memory" ); | 25 | : "cc", "memory" ); |
25 | return old; | 26 | return old == old_expected; |
26 | } | 27 | } |
27 | 28 | ||
28 | /* | 29 | /* |
@@ -34,57 +35,66 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
34 | * (the type definitions are in asm/spinlock_types.h) | 35 | * (the type definitions are in asm/spinlock_types.h) |
35 | */ | 36 | */ |
36 | 37 | ||
37 | #define arch_spin_is_locked(x) ((x)->owner_cpu != 0) | 38 | void arch_spin_lock_wait(arch_spinlock_t *); |
38 | #define arch_spin_unlock_wait(lock) \ | 39 | int arch_spin_trylock_retry(arch_spinlock_t *); |
39 | do { while (arch_spin_is_locked(lock)) \ | 40 | void arch_spin_relax(arch_spinlock_t *); |
40 | arch_spin_relax(lock); } while (0) | 41 | void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); |
41 | |||
42 | extern void arch_spin_lock_wait(arch_spinlock_t *); | ||
43 | extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); | ||
44 | extern int arch_spin_trylock_retry(arch_spinlock_t *); | ||
45 | extern void arch_spin_relax(arch_spinlock_t *lock); | ||
46 | 42 | ||
47 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | 43 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
48 | { | 44 | { |
49 | return lock.owner_cpu == 0; | 45 | return lock.lock == 0; |
50 | } | 46 | } |
51 | 47 | ||
52 | static inline void arch_spin_lock(arch_spinlock_t *lp) | 48 | static inline int arch_spin_is_locked(arch_spinlock_t *lp) |
49 | { | ||
50 | return ACCESS_ONCE(lp->lock) != 0; | ||
51 | } | ||
52 | |||
53 | static inline int arch_spin_trylock_once(arch_spinlock_t *lp) | ||
53 | { | 54 | { |
54 | int old; | 55 | unsigned int new = ~smp_processor_id(); |
55 | 56 | ||
56 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 57 | return _raw_compare_and_swap(&lp->lock, 0, new); |
57 | if (likely(old == 0)) | ||
58 | return; | ||
59 | arch_spin_lock_wait(lp); | ||
60 | } | 58 | } |
61 | 59 | ||
62 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, | 60 | static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) |
63 | unsigned long flags) | ||
64 | { | 61 | { |
65 | int old; | 62 | unsigned int old = ~smp_processor_id(); |
66 | 63 | ||
67 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 64 | return _raw_compare_and_swap(&lp->lock, old, 0); |
68 | if (likely(old == 0)) | ||
69 | return; | ||
70 | arch_spin_lock_wait_flags(lp, flags); | ||
71 | } | 65 | } |
72 | 66 | ||
73 | static inline int arch_spin_trylock(arch_spinlock_t *lp) | 67 | static inline void arch_spin_lock(arch_spinlock_t *lp) |
74 | { | 68 | { |
75 | int old; | 69 | if (unlikely(!arch_spin_trylock_once(lp))) |
70 | arch_spin_lock_wait(lp); | ||
71 | } | ||
76 | 72 | ||
77 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 73 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, |
78 | if (likely(old == 0)) | 74 | unsigned long flags) |
79 | return 1; | 75 | { |
80 | return arch_spin_trylock_retry(lp); | 76 | if (unlikely(!arch_spin_trylock_once(lp))) |
77 | arch_spin_lock_wait_flags(lp, flags); | ||
78 | } | ||
79 | |||
80 | static inline int arch_spin_trylock(arch_spinlock_t *lp) | ||
81 | { | ||
82 | if (unlikely(!arch_spin_trylock_once(lp))) | ||
83 | return arch_spin_trylock_retry(lp); | ||
84 | return 1; | ||
81 | } | 85 | } |
82 | 86 | ||
83 | static inline void arch_spin_unlock(arch_spinlock_t *lp) | 87 | static inline void arch_spin_unlock(arch_spinlock_t *lp) |
84 | { | 88 | { |
85 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); | 89 | arch_spin_tryrelease_once(lp); |
86 | } | 90 | } |
87 | 91 | ||
92 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
93 | { | ||
94 | while (arch_spin_is_locked(lock)) | ||
95 | arch_spin_relax(lock); | ||
96 | } | ||
97 | |||
88 | /* | 98 | /* |
89 | * Read-write spinlocks, allowing multiple readers | 99 | * Read-write spinlocks, allowing multiple readers |
90 | * but only one writer. | 100 | * but only one writer. |
@@ -119,7 +129,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
119 | { | 129 | { |
120 | unsigned int old; | 130 | unsigned int old; |
121 | old = rw->lock & 0x7fffffffU; | 131 | old = rw->lock & 0x7fffffffU; |
122 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) | 132 | if (!_raw_compare_and_swap(&rw->lock, old, old + 1)) |
123 | _raw_read_lock_wait(rw); | 133 | _raw_read_lock_wait(rw); |
124 | } | 134 | } |
125 | 135 | ||
@@ -127,30 +137,28 @@ static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) | |||
127 | { | 137 | { |
128 | unsigned int old; | 138 | unsigned int old; |
129 | old = rw->lock & 0x7fffffffU; | 139 | old = rw->lock & 0x7fffffffU; |
130 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) | 140 | if (!_raw_compare_and_swap(&rw->lock, old, old + 1)) |
131 | _raw_read_lock_wait_flags(rw, flags); | 141 | _raw_read_lock_wait_flags(rw, flags); |
132 | } | 142 | } |
133 | 143 | ||
134 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 144 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
135 | { | 145 | { |
136 | unsigned int old, cmp; | 146 | unsigned int old; |
137 | 147 | ||
138 | old = rw->lock; | ||
139 | do { | 148 | do { |
140 | cmp = old; | 149 | old = ACCESS_ONCE(rw->lock); |
141 | old = _raw_compare_and_swap(&rw->lock, old, old - 1); | 150 | } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); |
142 | } while (cmp != old); | ||
143 | } | 151 | } |
144 | 152 | ||
145 | static inline void arch_write_lock(arch_rwlock_t *rw) | 153 | static inline void arch_write_lock(arch_rwlock_t *rw) |
146 | { | 154 | { |
147 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 155 | if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) |
148 | _raw_write_lock_wait(rw); | 156 | _raw_write_lock_wait(rw); |
149 | } | 157 | } |
150 | 158 | ||
151 | static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) | 159 | static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
152 | { | 160 | { |
153 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 161 | if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) |
154 | _raw_write_lock_wait_flags(rw, flags); | 162 | _raw_write_lock_wait_flags(rw, flags); |
155 | } | 163 | } |
156 | 164 | ||
@@ -163,14 +171,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
163 | { | 171 | { |
164 | unsigned int old; | 172 | unsigned int old; |
165 | old = rw->lock & 0x7fffffffU; | 173 | old = rw->lock & 0x7fffffffU; |
166 | if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) | 174 | if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1))) |
167 | return 1; | 175 | return 1; |
168 | return _raw_read_trylock_retry(rw); | 176 | return _raw_read_trylock_retry(rw); |
169 | } | 177 | } |
170 | 178 | ||
171 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 179 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
172 | { | 180 | { |
173 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 181 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) |
174 | return 1; | 182 | return 1; |
175 | return _raw_write_trylock_retry(rw); | 183 | return _raw_write_trylock_retry(rw); |
176 | } | 184 | } |