diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-19 08:29:31 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-25 04:52:05 -0400 |
commit | d59b93da5e572703e1a7311c13dd3472a4e56e30 (patch) | |
tree | 4c75073780f54bc9785433256eee3d6d70eaf868 /arch/s390/lib/spinlock.c | |
parent | 46b05c7bd51edafb8c8da088b49bddf7f78d48f9 (diff) |
s390/rwlock: use directed yield for write-locked rwlocks
Add an owner field to the arch_rwlock_t to be able to pass the timeslice
of a virtual CPU with diagnose 0x9c to the lock owner in case the rwlock
is write-locked. The undirected yield in case the rwlock is acquired
writable but the lock is read-locked is removed.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib/spinlock.c')
-rw-r--r-- | arch/s390/lib/spinlock.c | 49 |
1 files changed, 30 insertions, 19 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 5b0e445bc3f3..5f63ac5783cb 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
98 | } | 98 | } |
99 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); | 99 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); |
100 | 100 | ||
101 | void arch_spin_relax(arch_spinlock_t *lp) | ||
102 | { | ||
103 | unsigned int cpu = lp->lock; | ||
104 | if (cpu != 0) { | ||
105 | if (MACHINE_IS_VM || MACHINE_IS_KVM || | ||
106 | !smp_vcpu_scheduled(~cpu)) | ||
107 | smp_yield_cpu(~cpu); | ||
108 | } | ||
109 | } | ||
110 | EXPORT_SYMBOL(arch_spin_relax); | ||
111 | |||
112 | int arch_spin_trylock_retry(arch_spinlock_t *lp) | 101 | int arch_spin_trylock_retry(arch_spinlock_t *lp) |
113 | { | 102 | { |
114 | int count; | 103 | int count; |
@@ -122,15 +111,18 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); | |||
122 | 111 | ||
123 | void _raw_read_lock_wait(arch_rwlock_t *rw) | 112 | void _raw_read_lock_wait(arch_rwlock_t *rw) |
124 | { | 113 | { |
125 | unsigned int old; | 114 | unsigned int owner, old; |
126 | int count = spin_retry; | 115 | int count = spin_retry; |
127 | 116 | ||
117 | owner = 0; | ||
128 | while (1) { | 118 | while (1) { |
129 | if (count-- <= 0) { | 119 | if (count-- <= 0) { |
130 | smp_yield(); | 120 | if (owner && !smp_vcpu_scheduled(~owner)) |
121 | smp_yield_cpu(~owner); | ||
131 | count = spin_retry; | 122 | count = spin_retry; |
132 | } | 123 | } |
133 | old = ACCESS_ONCE(rw->lock); | 124 | old = ACCESS_ONCE(rw->lock); |
125 | owner = ACCESS_ONCE(rw->owner); | ||
134 | if ((int) old < 0) | 126 | if ((int) old < 0) |
135 | continue; | 127 | continue; |
136 | if (_raw_compare_and_swap(&rw->lock, old, old + 1)) | 128 | if (_raw_compare_and_swap(&rw->lock, old, old + 1)) |
@@ -141,16 +133,19 @@ EXPORT_SYMBOL(_raw_read_lock_wait); | |||
141 | 133 | ||
142 | void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | 134 | void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) |
143 | { | 135 | { |
144 | unsigned int old; | 136 | unsigned int owner, old; |
145 | int count = spin_retry; | 137 | int count = spin_retry; |
146 | 138 | ||
147 | local_irq_restore(flags); | 139 | local_irq_restore(flags); |
140 | owner = 0; | ||
148 | while (1) { | 141 | while (1) { |
149 | if (count-- <= 0) { | 142 | if (count-- <= 0) { |
150 | smp_yield(); | 143 | if (owner && !smp_vcpu_scheduled(~owner)) |
144 | smp_yield_cpu(~owner); | ||
151 | count = spin_retry; | 145 | count = spin_retry; |
152 | } | 146 | } |
153 | old = ACCESS_ONCE(rw->lock); | 147 | old = ACCESS_ONCE(rw->lock); |
148 | owner = ACCESS_ONCE(rw->owner); | ||
154 | if ((int) old < 0) | 149 | if ((int) old < 0) |
155 | continue; | 150 | continue; |
156 | local_irq_disable(); | 151 | local_irq_disable(); |
@@ -179,15 +174,18 @@ EXPORT_SYMBOL(_raw_read_trylock_retry); | |||
179 | 174 | ||
180 | void _raw_write_lock_wait(arch_rwlock_t *rw) | 175 | void _raw_write_lock_wait(arch_rwlock_t *rw) |
181 | { | 176 | { |
182 | unsigned int old; | 177 | unsigned int owner, old; |
183 | int count = spin_retry; | 178 | int count = spin_retry; |
184 | 179 | ||
180 | owner = 0; | ||
185 | while (1) { | 181 | while (1) { |
186 | if (count-- <= 0) { | 182 | if (count-- <= 0) { |
187 | smp_yield(); | 183 | if (owner && !smp_vcpu_scheduled(~owner)) |
184 | smp_yield_cpu(~owner); | ||
188 | count = spin_retry; | 185 | count = spin_retry; |
189 | } | 186 | } |
190 | old = ACCESS_ONCE(rw->lock); | 187 | old = ACCESS_ONCE(rw->lock); |
188 | owner = ACCESS_ONCE(rw->owner); | ||
191 | if (old) | 189 | if (old) |
192 | continue; | 190 | continue; |
193 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) | 191 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) |
@@ -198,16 +196,19 @@ EXPORT_SYMBOL(_raw_write_lock_wait); | |||
198 | 196 | ||
199 | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | 197 | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) |
200 | { | 198 | { |
201 | unsigned int old; | 199 | unsigned int owner, old; |
202 | int count = spin_retry; | 200 | int count = spin_retry; |
203 | 201 | ||
204 | local_irq_restore(flags); | 202 | local_irq_restore(flags); |
203 | owner = 0; | ||
205 | while (1) { | 204 | while (1) { |
206 | if (count-- <= 0) { | 205 | if (count-- <= 0) { |
207 | smp_yield(); | 206 | if (owner && !smp_vcpu_scheduled(~owner)) |
207 | smp_yield_cpu(~owner); | ||
208 | count = spin_retry; | 208 | count = spin_retry; |
209 | } | 209 | } |
210 | old = ACCESS_ONCE(rw->lock); | 210 | old = ACCESS_ONCE(rw->lock); |
211 | owner = ACCESS_ONCE(rw->owner); | ||
211 | if (old) | 212 | if (old) |
212 | continue; | 213 | continue; |
213 | local_irq_disable(); | 214 | local_irq_disable(); |
@@ -233,3 +234,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) | |||
233 | return 0; | 234 | return 0; |
234 | } | 235 | } |
235 | EXPORT_SYMBOL(_raw_write_trylock_retry); | 236 | EXPORT_SYMBOL(_raw_write_trylock_retry); |
237 | |||
238 | void arch_lock_relax(unsigned int cpu) | ||
239 | { | ||
240 | if (!cpu) | ||
241 | return; | ||
242 | if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu)) | ||
243 | return; | ||
244 | smp_yield_cpu(~cpu); | ||
245 | } | ||
246 | EXPORT_SYMBOL(arch_lock_relax); | ||