diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/parisc/Kconfig | 1 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 134 |
2 files changed, 22 insertions, 113 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index bc7a19da6245..a7d4fd353c2b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -199,6 +199,7 @@ endchoice | |||
199 | 199 | ||
200 | config SMP | 200 | config SMP |
201 | bool "Symmetric multi-processing support" | 201 | bool "Symmetric multi-processing support" |
202 | select USE_GENERIC_SMP_HELPERS | ||
202 | ---help--- | 203 | ---help--- |
203 | This enables support for systems with more than one CPU. If you have | 204 | This enables support for systems with more than one CPU. If you have |
204 | a system with only one CPU, like most personal computers, say N. If | 205 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 85fc7754ec25..126105c76a44 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map); | |||
84 | 84 | ||
85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; |
86 | 86 | ||
87 | struct smp_call_struct { | ||
88 | void (*func) (void *info); | ||
89 | void *info; | ||
90 | long wait; | ||
91 | atomic_t unstarted_count; | ||
92 | atomic_t unfinished_count; | ||
93 | }; | ||
94 | static volatile struct smp_call_struct *smp_call_function_data; | ||
95 | |||
96 | enum ipi_message_type { | 87 | enum ipi_message_type { |
97 | IPI_NOP=0, | 88 | IPI_NOP=0, |
98 | IPI_RESCHEDULE=1, | 89 | IPI_RESCHEDULE=1, |
99 | IPI_CALL_FUNC, | 90 | IPI_CALL_FUNC, |
91 | IPI_CALL_FUNC_SINGLE, | ||
100 | IPI_CPU_START, | 92 | IPI_CPU_START, |
101 | IPI_CPU_STOP, | 93 | IPI_CPU_STOP, |
102 | IPI_CPU_TEST | 94 | IPI_CPU_TEST |
@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id) | |||
187 | 179 | ||
188 | case IPI_CALL_FUNC: | 180 | case IPI_CALL_FUNC: |
189 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); | 181 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); |
190 | { | 182 | generic_smp_call_function_interrupt(); |
191 | volatile struct smp_call_struct *data; | 183 | break; |
192 | void (*func)(void *info); | 184 | |
193 | void *info; | 185 | case IPI_CALL_FUNC_SINGLE: |
194 | int wait; | 186 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); |
195 | 187 | generic_smp_call_function_single_interrupt(); | |
196 | data = smp_call_function_data; | ||
197 | func = data->func; | ||
198 | info = data->info; | ||
199 | wait = data->wait; | ||
200 | |||
201 | mb(); | ||
202 | atomic_dec ((atomic_t *)&data->unstarted_count); | ||
203 | |||
204 | /* At this point, *data can't | ||
205 | * be relied upon. | ||
206 | */ | ||
207 | |||
208 | (*func)(info); | ||
209 | |||
210 | /* Notify the sending CPU that the | ||
211 | * task is done. | ||
212 | */ | ||
213 | mb(); | ||
214 | if (wait) | ||
215 | atomic_dec ((atomic_t *)&data->unfinished_count); | ||
216 | } | ||
217 | break; | 188 | break; |
218 | 189 | ||
219 | case IPI_CPU_START: | 190 | case IPI_CPU_START: |
@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op) | |||
256 | spin_unlock_irqrestore(lock, flags); | 227 | spin_unlock_irqrestore(lock, flags); |
257 | } | 228 | } |
258 | 229 | ||
230 | static void | ||
231 | send_IPI_mask(cpumask_t mask, enum ipi_message_type op) | ||
232 | { | ||
233 | int cpu; | ||
234 | |||
235 | for_each_cpu_mask(cpu, mask) | ||
236 | ipi_send(cpu, op); | ||
237 | } | ||
259 | 238 | ||
260 | static inline void | 239 | static inline void |
261 | send_IPI_single(int dest_cpu, enum ipi_message_type op) | 240 | send_IPI_single(int dest_cpu, enum ipi_message_type op) |
@@ -295,86 +274,15 @@ smp_send_all_nop(void) | |||
295 | send_IPI_allbutself(IPI_NOP); | 274 | send_IPI_allbutself(IPI_NOP); |
296 | } | 275 | } |
297 | 276 | ||
298 | 277 | void arch_send_call_function_ipi(cpumask_t mask) | |
299 | /** | ||
300 | * Run a function on all other CPUs. | ||
301 | * <func> The function to run. This must be fast and non-blocking. | ||
302 | * <info> An arbitrary pointer to pass to the function. | ||
303 | * <retry> If true, keep retrying until ready. | ||
304 | * <wait> If true, wait until function has completed on other CPUs. | ||
305 | * [RETURNS] 0 on success, else a negative status code. | ||
306 | * | ||
307 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
308 | * or have executed. | ||
309 | */ | ||
310 | |||
311 | int | ||
312 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
313 | { | 278 | { |
314 | struct smp_call_struct data; | 279 | send_IPI_mask(mask, IPI_CALL_FUNC); |
315 | unsigned long timeout; | ||
316 | static DEFINE_SPINLOCK(lock); | ||
317 | int retries = 0; | ||
318 | |||
319 | if (num_online_cpus() < 2) | ||
320 | return 0; | ||
321 | |||
322 | /* Can deadlock when called with interrupts disabled */ | ||
323 | WARN_ON(irqs_disabled()); | ||
324 | |||
325 | /* can also deadlock if IPIs are disabled */ | ||
326 | WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); | ||
327 | |||
328 | |||
329 | data.func = func; | ||
330 | data.info = info; | ||
331 | data.wait = wait; | ||
332 | atomic_set(&data.unstarted_count, num_online_cpus() - 1); | ||
333 | atomic_set(&data.unfinished_count, num_online_cpus() - 1); | ||
334 | |||
335 | if (retry) { | ||
336 | spin_lock (&lock); | ||
337 | while (smp_call_function_data != 0) | ||
338 | barrier(); | ||
339 | } | ||
340 | else { | ||
341 | spin_lock (&lock); | ||
342 | if (smp_call_function_data) { | ||
343 | spin_unlock (&lock); | ||
344 | return -EBUSY; | ||
345 | } | ||
346 | } | ||
347 | |||
348 | smp_call_function_data = &data; | ||
349 | spin_unlock (&lock); | ||
350 | |||
351 | /* Send a message to all other CPUs and wait for them to respond */ | ||
352 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
353 | |||
354 | retry: | ||
355 | /* Wait for response */ | ||
356 | timeout = jiffies + HZ; | ||
357 | while ( (atomic_read (&data.unstarted_count) > 0) && | ||
358 | time_before (jiffies, timeout) ) | ||
359 | barrier (); | ||
360 | |||
361 | if (atomic_read (&data.unstarted_count) > 0) { | ||
362 | printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n", | ||
363 | smp_processor_id(), ++retries); | ||
364 | goto retry; | ||
365 | } | ||
366 | /* We either got one or timed out. Release the lock */ | ||
367 | |||
368 | mb(); | ||
369 | smp_call_function_data = NULL; | ||
370 | |||
371 | while (wait && atomic_read (&data.unfinished_count) > 0) | ||
372 | barrier (); | ||
373 | |||
374 | return 0; | ||
375 | } | 280 | } |
376 | 281 | ||
377 | EXPORT_SYMBOL(smp_call_function); | 282 | void arch_send_call_function_single_ipi(int cpu) |
283 | { | ||
284 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); | ||
285 | } | ||
378 | 286 | ||
379 | /* | 287 | /* |
380 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() | 288 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() |