aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
commit1a781a777b2f6ac46523fe92396215762ced624d (patch)
tree4f34bb4aade85c0eb364b53d664ec7f6ab959006 /arch/arm
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
parent42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff)
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts: arch/powerpc/Kconfig arch/s390/kernel/time.c arch/x86/kernel/apic_32.c arch/x86/kernel/cpu/perfctr-watchdog.c arch/x86/kernel/i8259_64.c arch/x86/kernel/ldt.c arch/x86/kernel/nmi_64.c arch/x86/kernel/smpboot.c arch/x86/xen/smp.c include/asm-x86/hw_irq_32.h include/asm-x86/hw_irq_64.h include/asm-x86/mach-default/irq_vectors.h include/asm-x86/mach-voyager/irq_vectors.h include/asm-x86/smp.h kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/kernel/smp.c163
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
4 files changed, 21 insertions, 147 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 258f1369fb0c..c7ad324ddf2c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -701,6 +701,7 @@ source "kernel/time/Kconfig"
701config SMP 701config SMP
702 bool "Symmetric Multi-Processing (EXPERIMENTAL)" 702 bool "Symmetric Multi-Processing (EXPERIMENTAL)"
703 depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) 703 depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP)
704 select USE_GENERIC_SMP_HELPERS
704 help 705 help
705 This enables support for systems with more than one CPU. If you have 706 This enables support for systems with more than one CPU. If you have
706 a system with only one CPU, like most personal computers, say N. If 707 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index eefae1de334c..5a7c09564d13 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -68,20 +68,10 @@ enum ipi_msg_type {
68 IPI_TIMER, 68 IPI_TIMER,
69 IPI_RESCHEDULE, 69 IPI_RESCHEDULE,
70 IPI_CALL_FUNC, 70 IPI_CALL_FUNC,
71 IPI_CALL_FUNC_SINGLE,
71 IPI_CPU_STOP, 72 IPI_CPU_STOP,
72}; 73};
73 74
74struct smp_call_struct {
75 void (*func)(void *info);
76 void *info;
77 int wait;
78 cpumask_t pending;
79 cpumask_t unfinished;
80};
81
82static struct smp_call_struct * volatile smp_call_function_data;
83static DEFINE_SPINLOCK(smp_call_function_lock);
84
85int __cpuinit __cpu_up(unsigned int cpu) 75int __cpuinit __cpu_up(unsigned int cpu)
86{ 76{
87 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 77 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
366 local_irq_restore(flags); 356 local_irq_restore(flags);
367} 357}
368 358
369/* 359void arch_send_call_function_ipi(cpumask_t mask)
370 * You must not call this function with disabled interrupts, from a
371 * hardware interrupt handler, nor from a bottom half handler.
372 */
373static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
374 int retry, int wait, cpumask_t callmap)
375{
376 struct smp_call_struct data;
377 unsigned long timeout;
378 int ret = 0;
379
380 data.func = func;
381 data.info = info;
382 data.wait = wait;
383
384 cpu_clear(smp_processor_id(), callmap);
385 if (cpus_empty(callmap))
386 goto out;
387
388 data.pending = callmap;
389 if (wait)
390 data.unfinished = callmap;
391
392 /*
393 * try to get the mutex on smp_call_function_data
394 */
395 spin_lock(&smp_call_function_lock);
396 smp_call_function_data = &data;
397
398 send_ipi_message(callmap, IPI_CALL_FUNC);
399
400 timeout = jiffies + HZ;
401 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
402 barrier();
403
404 /*
405 * did we time out?
406 */
407 if (!cpus_empty(data.pending)) {
408 /*
409 * this may be causing our panic - report it
410 */
411 printk(KERN_CRIT
412 "CPU%u: smp_call_function timeout for %p(%p)\n"
413 " callmap %lx pending %lx, %swait\n",
414 smp_processor_id(), func, info, *cpus_addr(callmap),
415 *cpus_addr(data.pending), wait ? "" : "no ");
416
417 /*
418 * TRACE
419 */
420 timeout = jiffies + (5 * HZ);
421 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
422 barrier();
423
424 if (cpus_empty(data.pending))
425 printk(KERN_CRIT " RESOLVED\n");
426 else
427 printk(KERN_CRIT " STILL STUCK\n");
428 }
429
430 /*
431 * whatever happened, we're done with the data, so release it
432 */
433 smp_call_function_data = NULL;
434 spin_unlock(&smp_call_function_lock);
435
436 if (!cpus_empty(data.pending)) {
437 ret = -ETIMEDOUT;
438 goto out;
439 }
440
441 if (wait)
442 while (!cpus_empty(data.unfinished))
443 barrier();
444 out:
445
446 return 0;
447}
448
449int smp_call_function(void (*func)(void *info), void *info, int retry,
450 int wait)
451{ 360{
452 return smp_call_function_on_cpu(func, info, retry, wait, 361 send_ipi_message(mask, IPI_CALL_FUNC);
453 cpu_online_map);
454} 362}
455EXPORT_SYMBOL_GPL(smp_call_function);
456 363
457int smp_call_function_single(int cpu, void (*func)(void *info), void *info, 364void arch_send_call_function_single_ipi(int cpu)
458 int retry, int wait)
459{ 365{
460 /* prevent preemption and reschedule on another processor */ 366 send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
461 int current_cpu = get_cpu();
462 int ret = 0;
463
464 if (cpu == current_cpu) {
465 local_irq_disable();
466 func(info);
467 local_irq_enable();
468 } else
469 ret = smp_call_function_on_cpu(func, info, retry, wait,
470 cpumask_of_cpu(cpu));
471
472 put_cpu();
473
474 return ret;
475} 367}
476EXPORT_SYMBOL_GPL(smp_call_function_single);
477 368
478void show_ipi_list(struct seq_file *p) 369void show_ipi_list(struct seq_file *p)
479{ 370{
@@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs)
521} 412}
522#endif 413#endif
523 414
524/*
525 * ipi_call_function - handle IPI from smp_call_function()
526 *
527 * Note that we copy data out of the cross-call structure and then
528 * let the caller know that we're here and have done with their data
529 */
530static void ipi_call_function(unsigned int cpu)
531{
532 struct smp_call_struct *data = smp_call_function_data;
533 void (*func)(void *info) = data->func;
534 void *info = data->info;
535 int wait = data->wait;
536
537 cpu_clear(cpu, data->pending);
538
539 func(info);
540
541 if (wait)
542 cpu_clear(cpu, data->unfinished);
543}
544
545static DEFINE_SPINLOCK(stop_lock); 415static DEFINE_SPINLOCK(stop_lock);
546 416
547/* 417/*
@@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
611 break; 481 break;
612 482
613 case IPI_CALL_FUNC: 483 case IPI_CALL_FUNC:
614 ipi_call_function(cpu); 484 generic_smp_call_function_interrupt();
485 break;
486
487 case IPI_CALL_FUNC_SINGLE:
488 generic_smp_call_function_single_interrupt();
615 break; 489 break;
616 490
617 case IPI_CPU_STOP: 491 case IPI_CPU_STOP:
@@ -662,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier)
662} 536}
663 537
664static int 538static int
665on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, 539on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
666 cpumask_t mask)
667{ 540{
668 int ret = 0; 541 int ret = 0;
669 542
670 preempt_disable(); 543 preempt_disable();
671 544
672 ret = smp_call_function_on_cpu(func, info, retry, wait, mask); 545 ret = smp_call_function_mask(mask, func, info, wait);
673 if (cpu_isset(smp_processor_id(), mask)) 546 if (cpu_isset(smp_processor_id(), mask))
674 func(info); 547 func(info);
675 548
@@ -731,14 +604,14 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
731 604
732void flush_tlb_all(void) 605void flush_tlb_all(void)
733{ 606{
734 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); 607 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
735} 608}
736 609
737void flush_tlb_mm(struct mm_struct *mm) 610void flush_tlb_mm(struct mm_struct *mm)
738{ 611{
739 cpumask_t mask = mm->cpu_vm_mask; 612 cpumask_t mask = mm->cpu_vm_mask;
740 613
741 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); 614 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
742} 615}
743 616
744void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 617void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -749,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
749 ta.ta_vma = vma; 622 ta.ta_vma = vma;
750 ta.ta_start = uaddr; 623 ta.ta_start = uaddr;
751 624
752 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); 625 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
753} 626}
754 627
755void flush_tlb_kernel_page(unsigned long kaddr) 628void flush_tlb_kernel_page(unsigned long kaddr)
@@ -758,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
758 631
759 ta.ta_start = kaddr; 632 ta.ta_start = kaddr;
760 633
761 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); 634 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
762} 635}
763 636
764void flush_tlb_range(struct vm_area_struct *vma, 637void flush_tlb_range(struct vm_area_struct *vma,
@@ -771,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
771 ta.ta_start = start; 644 ta.ta_start = start;
772 ta.ta_end = end; 645 ta.ta_end = end;
773 646
774 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); 647 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
775} 648}
776 649
777void flush_tlb_kernel_range(unsigned long start, unsigned long end) 650void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -781,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
781 ta.ta_start = start; 654 ta.ta_start = start;
782 ta.ta_end = end; 655 ta.ta_end = end;
783 656
784 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); 657 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
785} 658}
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 74fae6045650..4458705021e0 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void))
201 data.ret = 0; 201 data.ret = 0;
202 202
203 preempt_disable(); 203 preempt_disable();
204 smp_call_function(em_func, &data, 1, 1); 204 smp_call_function(em_func, &data, 1);
205 em_func(&data); 205 em_func(&data);
206 preempt_enable(); 206 preempt_enable();
207 207
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 32455c633f1c..c0d2c9bb952b 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -352,7 +352,7 @@ static int __init vfp_init(void)
352 else if (vfpsid & FPSID_NODOUBLE) { 352 else if (vfpsid & FPSID_NODOUBLE) {
353 printk("no double precision support\n"); 353 printk("no double precision support\n");
354 } else { 354 } else {
355 smp_call_function(vfp_enable, NULL, 1, 1); 355 smp_call_function(vfp_enable, NULL, 1);
356 356
357 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ 357 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
358 printk("implementor %02x architecture %d part %02x variant %x rev %x\n", 358 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",