aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib/spinlock.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2015-11-19 05:09:45 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-11-27 03:24:18 -0500
commit419123f900dac58fb27ce5285b21074f5300095a (patch)
treed0b0d72691d4e2e3df0e9f4d2e335bebc378fdeb /arch/s390/lib/spinlock.c
parentc6f70d3b8a32fdec60d3f78cb59423f056f16688 (diff)
s390/spinlock: do not yield to a CPU in udelay/mdelay
It does not make sense to try to relinquish the time slice with diag 0x9c to a CPU in a state that does not allow to schedule the CPU. The scenario where this can happen is a CPU waiting in udelay/mdelay while holding a spin-lock. Add a CIF bit to tag a CPU in enabled wait and use it to detect that the yield of a CPU will not be successful and skip the diagnose call. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib/spinlock.c')
-rw-r--r--arch/s390/lib/spinlock.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 0a68fe04a9e1..d4549c964589 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -37,6 +37,15 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); 37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
38} 38}
39 39
40static inline int cpu_is_preempted(int cpu)
41{
42 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
43 return 0;
44 if (smp_vcpu_scheduled(cpu))
45 return 0;
46 return 1;
47}
48
40void arch_spin_lock_wait(arch_spinlock_t *lp) 49void arch_spin_lock_wait(arch_spinlock_t *lp)
41{ 50{
42 unsigned int cpu = SPINLOCK_LOCKVAL; 51 unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -53,7 +62,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
53 continue; 62 continue;
54 } 63 }
55 /* First iteration: check if the lock owner is running. */ 64 /* First iteration: check if the lock owner is running. */
56 if (first_diag && !smp_vcpu_scheduled(~owner)) { 65 if (first_diag && cpu_is_preempted(~owner)) {
57 smp_yield_cpu(~owner); 66 smp_yield_cpu(~owner);
58 first_diag = 0; 67 first_diag = 0;
59 continue; 68 continue;
@@ -72,7 +81,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
72 * yield the CPU unconditionally. For LPAR rely on the 81 * yield the CPU unconditionally. For LPAR rely on the
73 * sense running status. 82 * sense running status.
74 */ 83 */
75 if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) { 84 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
76 smp_yield_cpu(~owner); 85 smp_yield_cpu(~owner);
77 first_diag = 0; 86 first_diag = 0;
78 } 87 }
@@ -98,7 +107,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
98 local_irq_restore(flags); 107 local_irq_restore(flags);
99 } 108 }
100 /* Check if the lock owner is running. */ 109 /* Check if the lock owner is running. */
101 if (first_diag && !smp_vcpu_scheduled(~owner)) { 110 if (first_diag && cpu_is_preempted(~owner)) {
102 smp_yield_cpu(~owner); 111 smp_yield_cpu(~owner);
103 first_diag = 0; 112 first_diag = 0;
104 continue; 113 continue;
@@ -117,7 +126,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
117 * yield the CPU unconditionally. For LPAR rely on the 126 * yield the CPU unconditionally. For LPAR rely on the
118 * sense running status. 127 * sense running status.
119 */ 128 */
120 if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) { 129 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
121 smp_yield_cpu(~owner); 130 smp_yield_cpu(~owner);
122 first_diag = 0; 131 first_diag = 0;
123 } 132 }
@@ -155,7 +164,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
155 owner = 0; 164 owner = 0;
156 while (1) { 165 while (1) {
157 if (count-- <= 0) { 166 if (count-- <= 0) {
158 if (owner && !smp_vcpu_scheduled(~owner)) 167 if (owner && cpu_is_preempted(~owner))
159 smp_yield_cpu(~owner); 168 smp_yield_cpu(~owner);
160 count = spin_retry; 169 count = spin_retry;
161 } 170 }
@@ -201,7 +210,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
201 owner = 0; 210 owner = 0;
202 while (1) { 211 while (1) {
203 if (count-- <= 0) { 212 if (count-- <= 0) {
204 if (owner && !smp_vcpu_scheduled(~owner)) 213 if (owner && cpu_is_preempted(~owner))
205 smp_yield_cpu(~owner); 214 smp_yield_cpu(~owner);
206 count = spin_retry; 215 count = spin_retry;
207 } 216 }
@@ -231,7 +240,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
231 owner = 0; 240 owner = 0;
232 while (1) { 241 while (1) {
233 if (count-- <= 0) { 242 if (count-- <= 0) {
234 if (owner && !smp_vcpu_scheduled(~owner)) 243 if (owner && cpu_is_preempted(~owner))
235 smp_yield_cpu(~owner); 244 smp_yield_cpu(~owner);
236 count = spin_retry; 245 count = spin_retry;
237 } 246 }
@@ -275,7 +284,7 @@ void arch_lock_relax(unsigned int cpu)
275{ 284{
276 if (!cpu) 285 if (!cpu)
277 return; 286 return;
278 if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu)) 287 if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
279 return; 288 return;
280 smp_yield_cpu(~cpu); 289 smp_yield_cpu(~cpu);
281} 290}