diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-07-15 15:55:59 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-07-15 15:55:59 -0400 |
| commit | 1a781a777b2f6ac46523fe92396215762ced624d (patch) | |
| tree | 4f34bb4aade85c0eb364b53d664ec7f6ab959006 /arch/parisc/kernel | |
| parent | b9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff) | |
| parent | 42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff) | |
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts:
arch/powerpc/Kconfig
arch/s390/kernel/time.c
arch/x86/kernel/apic_32.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/i8259_64.c
arch/x86/kernel/ldt.c
arch/x86/kernel/nmi_64.c
arch/x86/kernel/smpboot.c
arch/x86/xen/smp.c
include/asm-x86/hw_irq_32.h
include/asm-x86/hw_irq_64.h
include/asm-x86/mach-default/irq_vectors.h
include/asm-x86/mach-voyager/irq_vectors.h
include/asm-x86/smp.h
kernel/Makefile
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/parisc/kernel')
| -rw-r--r-- | arch/parisc/kernel/cache.c | 6 | ||||
| -rw-r--r-- | arch/parisc/kernel/smp.c | 136 |
2 files changed, 25 insertions, 117 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index e10d25d2d9c..5259d8c2067 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
| @@ -51,12 +51,12 @@ static struct pdc_btlb_info btlb_info __read_mostly; | |||
| 51 | void | 51 | void |
| 52 | flush_data_cache(void) | 52 | flush_data_cache(void) |
| 53 | { | 53 | { |
| 54 | on_each_cpu(flush_data_cache_local, NULL, 1, 1); | 54 | on_each_cpu(flush_data_cache_local, NULL, 1); |
| 55 | } | 55 | } |
| 56 | void | 56 | void |
| 57 | flush_instruction_cache(void) | 57 | flush_instruction_cache(void) |
| 58 | { | 58 | { |
| 59 | on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); | 59 | on_each_cpu(flush_instruction_cache_local, NULL, 1); |
| 60 | } | 60 | } |
| 61 | #endif | 61 | #endif |
| 62 | 62 | ||
| @@ -515,7 +515,7 @@ static void cacheflush_h_tmp_function(void *dummy) | |||
| 515 | 515 | ||
| 516 | void flush_cache_all(void) | 516 | void flush_cache_all(void) |
| 517 | { | 517 | { |
| 518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); | 518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1); |
| 519 | } | 519 | } |
| 520 | 520 | ||
| 521 | void flush_cache_mm(struct mm_struct *mm) | 521 | void flush_cache_mm(struct mm_struct *mm) |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 85fc7754ec2..d47f3975c9c 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
| @@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map); | |||
| 84 | 84 | ||
| 85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; |
| 86 | 86 | ||
| 87 | struct smp_call_struct { | ||
| 88 | void (*func) (void *info); | ||
| 89 | void *info; | ||
| 90 | long wait; | ||
| 91 | atomic_t unstarted_count; | ||
| 92 | atomic_t unfinished_count; | ||
| 93 | }; | ||
| 94 | static volatile struct smp_call_struct *smp_call_function_data; | ||
| 95 | |||
| 96 | enum ipi_message_type { | 87 | enum ipi_message_type { |
| 97 | IPI_NOP=0, | 88 | IPI_NOP=0, |
| 98 | IPI_RESCHEDULE=1, | 89 | IPI_RESCHEDULE=1, |
| 99 | IPI_CALL_FUNC, | 90 | IPI_CALL_FUNC, |
| 91 | IPI_CALL_FUNC_SINGLE, | ||
| 100 | IPI_CPU_START, | 92 | IPI_CPU_START, |
| 101 | IPI_CPU_STOP, | 93 | IPI_CPU_STOP, |
| 102 | IPI_CPU_TEST | 94 | IPI_CPU_TEST |
| @@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id) | |||
| 187 | 179 | ||
| 188 | case IPI_CALL_FUNC: | 180 | case IPI_CALL_FUNC: |
| 189 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); | 181 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); |
| 190 | { | 182 | generic_smp_call_function_interrupt(); |
| 191 | volatile struct smp_call_struct *data; | 183 | break; |
| 192 | void (*func)(void *info); | 184 | |
| 193 | void *info; | 185 | case IPI_CALL_FUNC_SINGLE: |
| 194 | int wait; | 186 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); |
| 195 | 187 | generic_smp_call_function_single_interrupt(); | |
| 196 | data = smp_call_function_data; | ||
| 197 | func = data->func; | ||
| 198 | info = data->info; | ||
| 199 | wait = data->wait; | ||
| 200 | |||
| 201 | mb(); | ||
| 202 | atomic_dec ((atomic_t *)&data->unstarted_count); | ||
| 203 | |||
| 204 | /* At this point, *data can't | ||
| 205 | * be relied upon. | ||
| 206 | */ | ||
| 207 | |||
| 208 | (*func)(info); | ||
| 209 | |||
| 210 | /* Notify the sending CPU that the | ||
| 211 | * task is done. | ||
| 212 | */ | ||
| 213 | mb(); | ||
| 214 | if (wait) | ||
| 215 | atomic_dec ((atomic_t *)&data->unfinished_count); | ||
| 216 | } | ||
| 217 | break; | 188 | break; |
| 218 | 189 | ||
| 219 | case IPI_CPU_START: | 190 | case IPI_CPU_START: |
| @@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op) | |||
| 256 | spin_unlock_irqrestore(lock, flags); | 227 | spin_unlock_irqrestore(lock, flags); |
| 257 | } | 228 | } |
| 258 | 229 | ||
| 230 | static void | ||
| 231 | send_IPI_mask(cpumask_t mask, enum ipi_message_type op) | ||
| 232 | { | ||
| 233 | int cpu; | ||
| 234 | |||
| 235 | for_each_cpu_mask(cpu, mask) | ||
| 236 | ipi_send(cpu, op); | ||
| 237 | } | ||
| 259 | 238 | ||
| 260 | static inline void | 239 | static inline void |
| 261 | send_IPI_single(int dest_cpu, enum ipi_message_type op) | 240 | send_IPI_single(int dest_cpu, enum ipi_message_type op) |
| @@ -295,86 +274,15 @@ smp_send_all_nop(void) | |||
| 295 | send_IPI_allbutself(IPI_NOP); | 274 | send_IPI_allbutself(IPI_NOP); |
| 296 | } | 275 | } |
| 297 | 276 | ||
| 298 | 277 | void arch_send_call_function_ipi(cpumask_t mask) | |
| 299 | /** | ||
| 300 | * Run a function on all other CPUs. | ||
| 301 | * <func> The function to run. This must be fast and non-blocking. | ||
| 302 | * <info> An arbitrary pointer to pass to the function. | ||
| 303 | * <retry> If true, keep retrying until ready. | ||
| 304 | * <wait> If true, wait until function has completed on other CPUs. | ||
| 305 | * [RETURNS] 0 on success, else a negative status code. | ||
| 306 | * | ||
| 307 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
| 308 | * or have executed. | ||
| 309 | */ | ||
| 310 | |||
| 311 | int | ||
| 312 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
| 313 | { | 278 | { |
| 314 | struct smp_call_struct data; | 279 | send_IPI_mask(mask, IPI_CALL_FUNC); |
| 315 | unsigned long timeout; | ||
| 316 | static DEFINE_SPINLOCK(lock); | ||
| 317 | int retries = 0; | ||
| 318 | |||
| 319 | if (num_online_cpus() < 2) | ||
| 320 | return 0; | ||
| 321 | |||
| 322 | /* Can deadlock when called with interrupts disabled */ | ||
| 323 | WARN_ON(irqs_disabled()); | ||
| 324 | |||
| 325 | /* can also deadlock if IPIs are disabled */ | ||
| 326 | WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); | ||
| 327 | |||
| 328 | |||
| 329 | data.func = func; | ||
| 330 | data.info = info; | ||
| 331 | data.wait = wait; | ||
| 332 | atomic_set(&data.unstarted_count, num_online_cpus() - 1); | ||
| 333 | atomic_set(&data.unfinished_count, num_online_cpus() - 1); | ||
| 334 | |||
| 335 | if (retry) { | ||
| 336 | spin_lock (&lock); | ||
| 337 | while (smp_call_function_data != 0) | ||
| 338 | barrier(); | ||
| 339 | } | ||
| 340 | else { | ||
| 341 | spin_lock (&lock); | ||
| 342 | if (smp_call_function_data) { | ||
| 343 | spin_unlock (&lock); | ||
| 344 | return -EBUSY; | ||
| 345 | } | ||
| 346 | } | ||
| 347 | |||
| 348 | smp_call_function_data = &data; | ||
| 349 | spin_unlock (&lock); | ||
| 350 | |||
| 351 | /* Send a message to all other CPUs and wait for them to respond */ | ||
| 352 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
| 353 | |||
| 354 | retry: | ||
| 355 | /* Wait for response */ | ||
| 356 | timeout = jiffies + HZ; | ||
| 357 | while ( (atomic_read (&data.unstarted_count) > 0) && | ||
| 358 | time_before (jiffies, timeout) ) | ||
| 359 | barrier (); | ||
| 360 | |||
| 361 | if (atomic_read (&data.unstarted_count) > 0) { | ||
| 362 | printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n", | ||
| 363 | smp_processor_id(), ++retries); | ||
| 364 | goto retry; | ||
| 365 | } | ||
| 366 | /* We either got one or timed out. Release the lock */ | ||
| 367 | |||
| 368 | mb(); | ||
| 369 | smp_call_function_data = NULL; | ||
| 370 | |||
| 371 | while (wait && atomic_read (&data.unfinished_count) > 0) | ||
| 372 | barrier (); | ||
| 373 | |||
| 374 | return 0; | ||
| 375 | } | 280 | } |
| 376 | 281 | ||
| 377 | EXPORT_SYMBOL(smp_call_function); | 282 | void arch_send_call_function_single_ipi(int cpu) |
| 283 | { | ||
| 284 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); | ||
| 285 | } | ||
| 378 | 286 | ||
| 379 | /* | 287 | /* |
| 380 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() | 288 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() |
| @@ -384,7 +292,7 @@ EXPORT_SYMBOL(smp_call_function); | |||
| 384 | void | 292 | void |
| 385 | smp_flush_tlb_all(void) | 293 | smp_flush_tlb_all(void) |
| 386 | { | 294 | { |
| 387 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 295 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
| 388 | } | 296 | } |
| 389 | 297 | ||
| 390 | /* | 298 | /* |
