aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/spinlock.c30
1 files changed, 8 insertions, 22 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 91754ffb9203..093eb694d9c1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/smp.h>
13#include <asm/io.h> 14#include <asm/io.h>
14 15
15int spin_retry = 1000; 16int spin_retry = 1000;
@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str)
24} 25}
25__setup("spin_retry=", spin_retry_setup); 26__setup("spin_retry=", spin_retry_setup);
26 27
27static inline void _raw_yield(void)
28{
29 if (MACHINE_HAS_DIAG44)
30 asm volatile("diag 0,0,0x44");
31}
32
33static inline void _raw_yield_cpu(int cpu)
34{
35 if (MACHINE_HAS_DIAG9C)
36 asm volatile("diag %0,0,0x9c"
37 : : "d" (cpu_logical_map(cpu)));
38 else
39 _raw_yield();
40}
41
42void arch_spin_lock_wait(arch_spinlock_t *lp) 28void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 29{
44 int count = spin_retry; 30 int count = spin_retry;
@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
60 } 46 }
61 owner = lp->owner_cpu; 47 owner = lp->owner_cpu;
62 if (owner) 48 if (owner)
63 _raw_yield_cpu(~owner); 49 smp_yield_cpu(~owner);
64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 50 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
65 return; 51 return;
66 } 52 }
@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
91 } 77 }
92 owner = lp->owner_cpu; 78 owner = lp->owner_cpu;
93 if (owner) 79 if (owner)
94 _raw_yield_cpu(~owner); 80 smp_yield_cpu(~owner);
95 local_irq_disable(); 81 local_irq_disable();
96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 82 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
97 return; 83 return;
@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
121 if (cpu != 0) { 107 if (cpu != 0) {
122 if (MACHINE_IS_VM || MACHINE_IS_KVM || 108 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
123 !smp_vcpu_scheduled(~cpu)) 109 !smp_vcpu_scheduled(~cpu))
124 _raw_yield_cpu(~cpu); 110 smp_yield_cpu(~cpu);
125 } 111 }
126} 112}
127EXPORT_SYMBOL(arch_spin_relax); 113EXPORT_SYMBOL(arch_spin_relax);
@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
133 119
134 while (1) { 120 while (1) {
135 if (count-- <= 0) { 121 if (count-- <= 0) {
136 _raw_yield(); 122 smp_yield();
137 count = spin_retry; 123 count = spin_retry;
138 } 124 }
139 if (!arch_read_can_lock(rw)) 125 if (!arch_read_can_lock(rw))
@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
153 local_irq_restore(flags); 139 local_irq_restore(flags);
154 while (1) { 140 while (1) {
155 if (count-- <= 0) { 141 if (count-- <= 0) {
156 _raw_yield(); 142 smp_yield();
157 count = spin_retry; 143 count = spin_retry;
158 } 144 }
159 if (!arch_read_can_lock(rw)) 145 if (!arch_read_can_lock(rw))
@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
188 174
189 while (1) { 175 while (1) {
190 if (count-- <= 0) { 176 if (count-- <= 0) {
191 _raw_yield(); 177 smp_yield();
192 count = spin_retry; 178 count = spin_retry;
193 } 179 }
194 if (!arch_write_can_lock(rw)) 180 if (!arch_write_can_lock(rw))
@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
206 local_irq_restore(flags); 192 local_irq_restore(flags);
207 while (1) { 193 while (1) {
208 if (count-- <= 0) { 194 if (count-- <= 0) {
209 _raw_yield(); 195 smp_yield();
210 count = spin_retry; 196 count = spin_retry;
211 } 197 }
212 if (!arch_write_can_lock(rw)) 198 if (!arch_write_can_lock(rw))