diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-03-11 11:59:26 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-03-11 11:59:28 -0400 |
commit | 8b646bd759086f6090fe27acf414c0b5faa737f4 (patch) | |
tree | 29475659031c57ccf2ca43899614ab5c6b1899a0 /arch/s390/lib | |
parent | 7e180bd8020d213bb0de15c3606968f8a9262439 (diff) |
[S390] rework smp code
Define struct pcpu and merge some of the NR_CPUS arrays into it, including
__cpu_logical_map, current_set and smp_cpu_state. Split smp related
functions to those operating on physical cpus and the functions operating
on a logical cpu number. Make the functions for physical cpus use a
pointer to a struct pcpu. This hides the knowledge about cpu addresses in
smp.c, entry[64].S and swsusp_asm64.S, thus remove the sigp.h header.
The PSW restart mechanism is used to start secondary cpus, calling a
function on an online cpu, calling a function on the ipl cpu, and for
the nmi signal. Replace the different assembler functions with a
single function restart_int_handler. The new entry point calls a function
whose pointer is stored in the lowcore of the target cpu and it can wait
for the source cpu to stop. This covers all existing use cases.
Overall the code is now simpler and there are ~380 lines less code.
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/spinlock.c | 30 |
1 files changed, 8 insertions, 22 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 91754ffb9203..093eb694d9c1 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/smp.h> | ||
13 | #include <asm/io.h> | 14 | #include <asm/io.h> |
14 | 15 | ||
15 | int spin_retry = 1000; | 16 | int spin_retry = 1000; |
@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str) | |||
24 | } | 25 | } |
25 | __setup("spin_retry=", spin_retry_setup); | 26 | __setup("spin_retry=", spin_retry_setup); |
26 | 27 | ||
27 | static inline void _raw_yield(void) | ||
28 | { | ||
29 | if (MACHINE_HAS_DIAG44) | ||
30 | asm volatile("diag 0,0,0x44"); | ||
31 | } | ||
32 | |||
33 | static inline void _raw_yield_cpu(int cpu) | ||
34 | { | ||
35 | if (MACHINE_HAS_DIAG9C) | ||
36 | asm volatile("diag %0,0,0x9c" | ||
37 | : : "d" (cpu_logical_map(cpu))); | ||
38 | else | ||
39 | _raw_yield(); | ||
40 | } | ||
41 | |||
42 | void arch_spin_lock_wait(arch_spinlock_t *lp) | 28 | void arch_spin_lock_wait(arch_spinlock_t *lp) |
43 | { | 29 | { |
44 | int count = spin_retry; | 30 | int count = spin_retry; |
@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) | |||
60 | } | 46 | } |
61 | owner = lp->owner_cpu; | 47 | owner = lp->owner_cpu; |
62 | if (owner) | 48 | if (owner) |
63 | _raw_yield_cpu(~owner); | 49 | smp_yield_cpu(~owner); |
64 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 50 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
65 | return; | 51 | return; |
66 | } | 52 | } |
@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
91 | } | 77 | } |
92 | owner = lp->owner_cpu; | 78 | owner = lp->owner_cpu; |
93 | if (owner) | 79 | if (owner) |
94 | _raw_yield_cpu(~owner); | 80 | smp_yield_cpu(~owner); |
95 | local_irq_disable(); | 81 | local_irq_disable(); |
96 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 82 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
97 | return; | 83 | return; |
@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock) | |||
121 | if (cpu != 0) { | 107 | if (cpu != 0) { |
122 | if (MACHINE_IS_VM || MACHINE_IS_KVM || | 108 | if (MACHINE_IS_VM || MACHINE_IS_KVM || |
123 | !smp_vcpu_scheduled(~cpu)) | 109 | !smp_vcpu_scheduled(~cpu)) |
124 | _raw_yield_cpu(~cpu); | 110 | smp_yield_cpu(~cpu); |
125 | } | 111 | } |
126 | } | 112 | } |
127 | EXPORT_SYMBOL(arch_spin_relax); | 113 | EXPORT_SYMBOL(arch_spin_relax); |
@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) | |||
133 | 119 | ||
134 | while (1) { | 120 | while (1) { |
135 | if (count-- <= 0) { | 121 | if (count-- <= 0) { |
136 | _raw_yield(); | 122 | smp_yield(); |
137 | count = spin_retry; | 123 | count = spin_retry; |
138 | } | 124 | } |
139 | if (!arch_read_can_lock(rw)) | 125 | if (!arch_read_can_lock(rw)) |
@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | |||
153 | local_irq_restore(flags); | 139 | local_irq_restore(flags); |
154 | while (1) { | 140 | while (1) { |
155 | if (count-- <= 0) { | 141 | if (count-- <= 0) { |
156 | _raw_yield(); | 142 | smp_yield(); |
157 | count = spin_retry; | 143 | count = spin_retry; |
158 | } | 144 | } |
159 | if (!arch_read_can_lock(rw)) | 145 | if (!arch_read_can_lock(rw)) |
@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) | |||
188 | 174 | ||
189 | while (1) { | 175 | while (1) { |
190 | if (count-- <= 0) { | 176 | if (count-- <= 0) { |
191 | _raw_yield(); | 177 | smp_yield(); |
192 | count = spin_retry; | 178 | count = spin_retry; |
193 | } | 179 | } |
194 | if (!arch_write_can_lock(rw)) | 180 | if (!arch_write_can_lock(rw)) |
@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | |||
206 | local_irq_restore(flags); | 192 | local_irq_restore(flags); |
207 | while (1) { | 193 | while (1) { |
208 | if (count-- <= 0) { | 194 | if (count-- <= 0) { |
209 | _raw_yield(); | 195 | smp_yield(); |
210 | count = spin_retry; | 196 | count = spin_retry; |
211 | } | 197 | } |
212 | if (!arch_write_can_lock(rw)) | 198 | if (!arch_write_can_lock(rw)) |