aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/kernel
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-10 14:47:29 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:22:57 -0400
commitc524a1d8914408fd57241d9542fa2d402f004a33 (patch)
tree2779a09788c750405c53eff8fa6c967985c3559c /arch/alpha/kernel
parentf27b433ef32a77c8cb76f018507453df7c03e552 (diff)
alpha: convert to generic helpers for IPI function calls
This converts alpha to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/smp.c170
2 files changed, 13 insertions, 163 deletions
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index b04f1feb1dda..ced4aae8b804 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
660 660
661#ifdef CONFIG_SMP 661#ifdef CONFIG_SMP
662 if (smp_processor_id() != boot_cpuid) 662 if (smp_processor_id() != boot_cpuid)
663 smp_call_function_on_cpu(__marvel_access_rtc, 663 smp_call_function_single(boot_cpuid,
664 &rtc_access, 1, 1, 664 __marvel_access_rtc,
665 cpumask_of_cpu(boot_cpuid)); 665 &rtc_access, 1, 1);
666 else 666 else
667 __marvel_access_rtc(&rtc_access); 667 __marvel_access_rtc(&rtc_access);
668#else 668#else
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 2525692db0ab..95c905be9154 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -62,6 +62,7 @@ static struct {
62enum ipi_message_type { 62enum ipi_message_type {
63 IPI_RESCHEDULE, 63 IPI_RESCHEDULE,
64 IPI_CALL_FUNC, 64 IPI_CALL_FUNC,
65 IPI_CALL_FUNC_SINGLE,
65 IPI_CPU_STOP, 66 IPI_CPU_STOP,
66}; 67};
67 68
@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
558 wripir(i); 559 wripir(i);
559} 560}
560 561
561/* Structure and data for smp_call_function. This is designed to
562 minimize static memory requirements. Plus it looks cleaner. */
563
564struct smp_call_struct {
565 void (*func) (void *info);
566 void *info;
567 long wait;
568 atomic_t unstarted_count;
569 atomic_t unfinished_count;
570};
571
572static struct smp_call_struct *smp_call_function_data;
573
574/* Atomicly drop data into a shared pointer. The pointer is free if
575 it is initially locked. If retry, spin until free. */
576
577static int
578pointer_lock (void *lock, void *data, int retry)
579{
580 void *old, *tmp;
581
582 mb();
583 again:
584 /* Compare and swap with zero. */
585 asm volatile (
586 "1: ldq_l %0,%1\n"
587 " mov %3,%2\n"
588 " bne %0,2f\n"
589 " stq_c %2,%1\n"
590 " beq %2,1b\n"
591 "2:"
592 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
593 : "r"(data)
594 : "memory");
595
596 if (old == 0)
597 return 0;
598 if (! retry)
599 return -EBUSY;
600
601 while (*(void **)lock)
602 barrier();
603 goto again;
604}
605
606void 562void
607handle_ipi(struct pt_regs *regs) 563handle_ipi(struct pt_regs *regs)
608{ 564{
@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs)
632 break; 588 break;
633 589
634 case IPI_CALL_FUNC: 590 case IPI_CALL_FUNC:
635 { 591 generic_smp_call_function_interrupt();
636 struct smp_call_struct *data; 592 break;
637 void (*func)(void *info); 593
638 void *info; 594 case IPI_CALL_FUNC_SINGLE:
639 int wait; 595 generic_smp_call_function_single_interrupt();
640
641 data = smp_call_function_data;
642 func = data->func;
643 info = data->info;
644 wait = data->wait;
645
646 /* Notify the sending CPU that the data has been
647 received, and execution is about to begin. */
648 mb();
649 atomic_dec (&data->unstarted_count);
650
651 /* At this point the structure may be gone unless
652 wait is true. */
653 (*func)(info);
654
655 /* Notify the sending CPU that the task is done. */
656 mb();
657 if (wait) atomic_dec (&data->unfinished_count);
658 break; 596 break;
659 }
660 597
661 case IPI_CPU_STOP: 598 case IPI_CPU_STOP:
662 halt(); 599 halt();
@@ -700,102 +637,15 @@ smp_send_stop(void)
700 send_ipi_message(to_whom, IPI_CPU_STOP); 637 send_ipi_message(to_whom, IPI_CPU_STOP);
701} 638}
702 639
703/* 640void arch_send_call_function_ipi(cpumask_t mask)
704 * Run a function on all other CPUs.
705 * <func> The function to run. This must be fast and non-blocking.
706 * <info> An arbitrary pointer to pass to the function.
707 * <retry> If true, keep retrying until ready.
708 * <wait> If true, wait until function has completed on other CPUs.
709 * [RETURNS] 0 on success, else a negative status code.
710 *
711 * Does not return until remote CPUs are nearly ready to execute <func>
712 * or are or have executed.
713 * You must not call this function with disabled interrupts or from a
714 * hardware interrupt handler or from a bottom half handler.
715 */
716
717int
718smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
719 int wait, cpumask_t to_whom)
720{ 641{
721 struct smp_call_struct data; 642 send_ipi_message(mask, IPI_CALL_FUNC);
722 unsigned long timeout;
723 int num_cpus_to_call;
724
725 /* Can deadlock when called with interrupts disabled */
726 WARN_ON(irqs_disabled());
727
728 data.func = func;
729 data.info = info;
730 data.wait = wait;
731
732 cpu_clear(smp_processor_id(), to_whom);
733 num_cpus_to_call = cpus_weight(to_whom);
734
735 atomic_set(&data.unstarted_count, num_cpus_to_call);
736 atomic_set(&data.unfinished_count, num_cpus_to_call);
737
738 /* Acquire the smp_call_function_data mutex. */
739 if (pointer_lock(&smp_call_function_data, &data, retry))
740 return -EBUSY;
741
742 /* Send a message to the requested CPUs. */
743 send_ipi_message(to_whom, IPI_CALL_FUNC);
744
745 /* Wait for a minimal response. */
746 timeout = jiffies + HZ;
747 while (atomic_read (&data.unstarted_count) > 0
748 && time_before (jiffies, timeout))
749 barrier();
750
751 /* If there's no response yet, log a message but allow a longer
752 * timeout period -- if we get a response this time, log
753 * a message saying when we got it..
754 */
755 if (atomic_read(&data.unstarted_count) > 0) {
756 long start_time = jiffies;
757 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
758 __func__);
759 timeout = jiffies + 30 * HZ;
760 while (atomic_read(&data.unstarted_count) > 0
761 && time_before(jiffies, timeout))
762 barrier();
763 if (atomic_read(&data.unstarted_count) <= 0) {
764 long delta = jiffies - start_time;
765 printk(KERN_ERR
766 "%s: response %ld.%ld seconds into long wait\n",
767 __func__, delta / HZ,
768 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
769 }
770 }
771
772 /* We either got one or timed out -- clear the lock. */
773 mb();
774 smp_call_function_data = NULL;
775
776 /*
777 * If after both the initial and long timeout periods we still don't
778 * have a response, something is very wrong...
779 */
780 BUG_ON(atomic_read (&data.unstarted_count) > 0);
781
782 /* Wait for a complete response, if needed. */
783 if (wait) {
784 while (atomic_read (&data.unfinished_count) > 0)
785 barrier();
786 }
787
788 return 0;
789} 643}
790EXPORT_SYMBOL(smp_call_function_on_cpu);
791 644
792int 645void arch_send_call_function_single_ipi(int cpu)
793smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
794{ 646{
795 return smp_call_function_on_cpu (func, info, retry, wait, 647 send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
796 cpu_online_map);
797} 648}
798EXPORT_SYMBOL(smp_call_function);
799 649
800static void 650static void
801ipi_imb(void *ignored) 651ipi_imb(void *ignored)