aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-09-19 08:29:31 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-09-25 04:52:05 -0400
commitd59b93da5e572703e1a7311c13dd3472a4e56e30 (patch)
tree4c75073780f54bc9785433256eee3d6d70eaf868 /arch/s390
parent46b05c7bd51edafb8c8da088b49bddf7f78d48f9 (diff)
s390/rwlock: use directed yield for write-locked rwlocks
Add an owner field to the arch_rwlock_t to be able to pass the timeslice of a virtual CPU with diagnose 0x9c to the lock owner in case the rwlock is write-locked. The undirected yield in case the rwlock is acquired writable but the lock is read-locked is removed. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/spinlock.h28
-rw-r--r--arch/s390/include/asm/spinlock_types.h1
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/lib/spinlock.c49
5 files changed, 54 insertions, 32 deletions
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 4f1307962a95..762d4f88af5a 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
29extern int smp_store_status(int cpu); 29extern int smp_store_status(int cpu);
30extern int smp_vcpu_scheduled(int cpu); 30extern int smp_vcpu_scheduled(int cpu);
31extern void smp_yield_cpu(int cpu); 31extern void smp_yield_cpu(int cpu);
32extern void smp_yield(void);
33extern void smp_cpu_set_polarization(int cpu, int val); 32extern void smp_cpu_set_polarization(int cpu, int val);
34extern int smp_cpu_get_polarization(int cpu); 33extern int smp_cpu_get_polarization(int cpu);
35extern void smp_fill_possible_mask(void); 34extern void smp_fill_possible_mask(void);
@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
50static inline int smp_store_status(int cpu) { return 0; } 49static inline int smp_store_status(int cpu) { return 0; }
51static inline int smp_vcpu_scheduled(int cpu) { return 1; } 50static inline int smp_vcpu_scheduled(int cpu) { return 1; }
52static inline void smp_yield_cpu(int cpu) { } 51static inline void smp_yield_cpu(int cpu) { }
53static inline void smp_yield(void) { }
54static inline void smp_fill_possible_mask(void) { } 52static inline void smp_fill_possible_mask(void) { }
55 53
56#endif /* CONFIG_SMP */ 54#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index d26ad2ac7cb2..e98654163e5c 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
37 * (the type definitions are in asm/spinlock_types.h) 37 * (the type definitions are in asm/spinlock_types.h)
38 */ 38 */
39 39
40void arch_lock_relax(unsigned int cpu);
41
40void arch_spin_lock_wait(arch_spinlock_t *); 42void arch_spin_lock_wait(arch_spinlock_t *);
41int arch_spin_trylock_retry(arch_spinlock_t *); 43int arch_spin_trylock_retry(arch_spinlock_t *);
42void arch_spin_relax(arch_spinlock_t *);
43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 44void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 45
46static inline void arch_spin_relax(arch_spinlock_t *lock)
47{
48 arch_lock_relax(lock->lock);
49}
50
45static inline u32 arch_spin_lockval(int cpu) 51static inline u32 arch_spin_lockval(int cpu)
46{ 52{
47 return ~cpu; 53 return ~cpu;
@@ -170,17 +176,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
170{ 176{
171 if (!arch_write_trylock_once(rw)) 177 if (!arch_write_trylock_once(rw))
172 _raw_write_lock_wait(rw); 178 _raw_write_lock_wait(rw);
179 rw->owner = SPINLOCK_LOCKVAL;
173} 180}
174 181
175static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 182static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
176{ 183{
177 if (!arch_write_trylock_once(rw)) 184 if (!arch_write_trylock_once(rw))
178 _raw_write_lock_wait_flags(rw, flags); 185 _raw_write_lock_wait_flags(rw, flags);
186 rw->owner = SPINLOCK_LOCKVAL;
179} 187}
180 188
181static inline void arch_write_unlock(arch_rwlock_t *rw) 189static inline void arch_write_unlock(arch_rwlock_t *rw)
182{ 190{
183 typecheck(unsigned int, rw->lock); 191 typecheck(unsigned int, rw->lock);
192
193 rw->owner = 0;
184 asm volatile( 194 asm volatile(
185 __ASM_BARRIER 195 __ASM_BARRIER
186 "st %1,%0\n" 196 "st %1,%0\n"
@@ -198,12 +208,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
198 208
199static inline int arch_write_trylock(arch_rwlock_t *rw) 209static inline int arch_write_trylock(arch_rwlock_t *rw)
200{ 210{
201 if (!arch_write_trylock_once(rw)) 211 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
202 return _raw_write_trylock_retry(rw); 212 return 0;
213 rw->owner = SPINLOCK_LOCKVAL;
203 return 1; 214 return 1;
204} 215}
205 216
206#define arch_read_relax(lock) cpu_relax() 217static inline void arch_read_relax(arch_rwlock_t *rw)
207#define arch_write_relax(lock) cpu_relax() 218{
219 arch_lock_relax(rw->owner);
220}
221
222static inline void arch_write_relax(arch_rwlock_t *rw)
223{
224 arch_lock_relax(rw->owner);
225}
208 226
209#endif /* __ASM_SPINLOCK_H */ 227#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index b2cd6ff7c2c5..d84b6939237c 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,6 +13,7 @@ typedef struct {
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
16 unsigned int owner;
16} arch_rwlock_t; 17} arch_rwlock_t;
17 18
18#define __ARCH_RW_LOCK_UNLOCKED { 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 243c7e512600..abec97b4ddbf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -333,12 +333,6 @@ int smp_vcpu_scheduled(int cpu)
333 return pcpu_running(pcpu_devices + cpu); 333 return pcpu_running(pcpu_devices + cpu);
334} 334}
335 335
336void smp_yield(void)
337{
338 if (MACHINE_HAS_DIAG44)
339 asm volatile("diag 0,0,0x44");
340}
341
342void smp_yield_cpu(int cpu) 336void smp_yield_cpu(int cpu)
343{ 337{
344 if (MACHINE_HAS_DIAG9C) 338 if (MACHINE_HAS_DIAG9C)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 5b0e445bc3f3..5f63ac5783cb 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
98} 98}
99EXPORT_SYMBOL(arch_spin_lock_wait_flags); 99EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100 100
101void arch_spin_relax(arch_spinlock_t *lp)
102{
103 unsigned int cpu = lp->lock;
104 if (cpu != 0) {
105 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
106 !smp_vcpu_scheduled(~cpu))
107 smp_yield_cpu(~cpu);
108 }
109}
110EXPORT_SYMBOL(arch_spin_relax);
111
112int arch_spin_trylock_retry(arch_spinlock_t *lp) 101int arch_spin_trylock_retry(arch_spinlock_t *lp)
113{ 102{
114 int count; 103 int count;
@@ -122,15 +111,18 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
122 111
123void _raw_read_lock_wait(arch_rwlock_t *rw) 112void _raw_read_lock_wait(arch_rwlock_t *rw)
124{ 113{
125 unsigned int old; 114 unsigned int owner, old;
126 int count = spin_retry; 115 int count = spin_retry;
127 116
117 owner = 0;
128 while (1) { 118 while (1) {
129 if (count-- <= 0) { 119 if (count-- <= 0) {
130 smp_yield(); 120 if (owner && !smp_vcpu_scheduled(~owner))
121 smp_yield_cpu(~owner);
131 count = spin_retry; 122 count = spin_retry;
132 } 123 }
133 old = ACCESS_ONCE(rw->lock); 124 old = ACCESS_ONCE(rw->lock);
125 owner = ACCESS_ONCE(rw->owner);
134 if ((int) old < 0) 126 if ((int) old < 0)
135 continue; 127 continue;
136 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 128 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
@@ -141,16 +133,19 @@ EXPORT_SYMBOL(_raw_read_lock_wait);
141 133
142void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 134void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
143{ 135{
144 unsigned int old; 136 unsigned int owner, old;
145 int count = spin_retry; 137 int count = spin_retry;
146 138
147 local_irq_restore(flags); 139 local_irq_restore(flags);
140 owner = 0;
148 while (1) { 141 while (1) {
149 if (count-- <= 0) { 142 if (count-- <= 0) {
150 smp_yield(); 143 if (owner && !smp_vcpu_scheduled(~owner))
144 smp_yield_cpu(~owner);
151 count = spin_retry; 145 count = spin_retry;
152 } 146 }
153 old = ACCESS_ONCE(rw->lock); 147 old = ACCESS_ONCE(rw->lock);
148 owner = ACCESS_ONCE(rw->owner);
154 if ((int) old < 0) 149 if ((int) old < 0)
155 continue; 150 continue;
156 local_irq_disable(); 151 local_irq_disable();
@@ -179,15 +174,18 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);
179 174
180void _raw_write_lock_wait(arch_rwlock_t *rw) 175void _raw_write_lock_wait(arch_rwlock_t *rw)
181{ 176{
182 unsigned int old; 177 unsigned int owner, old;
183 int count = spin_retry; 178 int count = spin_retry;
184 179
180 owner = 0;
185 while (1) { 181 while (1) {
186 if (count-- <= 0) { 182 if (count-- <= 0) {
187 smp_yield(); 183 if (owner && !smp_vcpu_scheduled(~owner))
184 smp_yield_cpu(~owner);
188 count = spin_retry; 185 count = spin_retry;
189 } 186 }
190 old = ACCESS_ONCE(rw->lock); 187 old = ACCESS_ONCE(rw->lock);
188 owner = ACCESS_ONCE(rw->owner);
191 if (old) 189 if (old)
192 continue; 190 continue;
193 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 191 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
@@ -198,16 +196,19 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
198 196
199void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 197void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
200{ 198{
201 unsigned int old; 199 unsigned int owner, old;
202 int count = spin_retry; 200 int count = spin_retry;
203 201
204 local_irq_restore(flags); 202 local_irq_restore(flags);
203 owner = 0;
205 while (1) { 204 while (1) {
206 if (count-- <= 0) { 205 if (count-- <= 0) {
207 smp_yield(); 206 if (owner && !smp_vcpu_scheduled(~owner))
207 smp_yield_cpu(~owner);
208 count = spin_retry; 208 count = spin_retry;
209 } 209 }
210 old = ACCESS_ONCE(rw->lock); 210 old = ACCESS_ONCE(rw->lock);
211 owner = ACCESS_ONCE(rw->owner);
211 if (old) 212 if (old)
212 continue; 213 continue;
213 local_irq_disable(); 214 local_irq_disable();
@@ -233,3 +234,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
233 return 0; 234 return 0;
234} 235}
235EXPORT_SYMBOL(_raw_write_trylock_retry); 236EXPORT_SYMBOL(_raw_write_trylock_retry);
237
238void arch_lock_relax(unsigned int cpu)
239{
240 if (!cpu)
241 return;
242 if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
243 return;
244 smp_yield_cpu(~cpu);
245}
246EXPORT_SYMBOL(arch_lock_relax);