aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c3
-rw-r--r--arch/m32r/kernel/smp.c128
-rw-r--r--arch/m32r/kernel/traps.c3
-rw-r--r--include/asm-m32r/smp.h4
5 files changed, 20 insertions, 119 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index de153de2ea9f..a5f864c445b2 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -296,6 +296,7 @@ config PREEMPT
296 296
297config SMP 297config SMP
298 bool "Symmetric multi-processing support" 298 bool "Symmetric multi-processing support"
299 select USE_GENERIC_SMP_HELPERS
299 ---help--- 300 ---help---
300 This enables support for systems with more than one CPU. If you have 301 This enables support for systems with more than one CPU. If you have
301 a system with only one CPU, like most personal computers, say N. If 302 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index e6709fe950ba..16bcb189a383 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -43,9 +43,6 @@ EXPORT_SYMBOL(dcache_dummy);
43#endif 43#endif
44EXPORT_SYMBOL(cpu_data); 44EXPORT_SYMBOL(cpu_data);
45 45
46/* Global SMP stuff */
47EXPORT_SYMBOL(smp_call_function);
48
49/* TLB flushing */ 46/* TLB flushing */
50EXPORT_SYMBOL(smp_flush_tlb_page); 47EXPORT_SYMBOL(smp_flush_tlb_page);
51#endif 48#endif
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c837bc13b015..74eb7bcd5a40 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -35,22 +35,6 @@
35/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 35/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
36 36
37/* 37/*
38 * Structure and data for smp_call_function(). This is designed to minimise
39 * static memory requirements. It also looks cleaner.
40 */
41static DEFINE_SPINLOCK(call_lock);
42
43struct call_data_struct {
44 void (*func) (void *info);
45 void *info;
46 atomic_t started;
47 atomic_t finished;
48 int wait;
49} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
50
51static struct call_data_struct *call_data;
52
53/*
54 * For flush_cache_all() 38 * For flush_cache_all()
55 */ 39 */
56static DEFINE_SPINLOCK(flushcache_lock); 40static DEFINE_SPINLOCK(flushcache_lock);
@@ -96,9 +80,6 @@ void smp_invalidate_interrupt(void);
96void smp_send_stop(void); 80void smp_send_stop(void);
97static void stop_this_cpu(void *); 81static void stop_this_cpu(void *);
98 82
99int smp_call_function(void (*) (void *), void *, int, int);
100void smp_call_function_interrupt(void);
101
102void smp_send_timer(void); 83void smp_send_timer(void);
103void smp_ipi_timer_interrupt(struct pt_regs *); 84void smp_ipi_timer_interrupt(struct pt_regs *);
104void smp_local_timer_interrupt(void); 85void smp_local_timer_interrupt(void);
@@ -565,86 +546,14 @@ static void stop_this_cpu(void *dummy)
565 for ( ; ; ); 546 for ( ; ; );
566} 547}
567 548
568/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 549void arch_send_call_function_ipi(cpumask_t mask)
569/* Call function Routines */
570/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
571
572/*==========================================================================*
573 * Name: smp_call_function
574 *
575 * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
576 * in the system.
577 *
578 * Born on Date: 2002.02.05
579 *
580 * Arguments: *func - The function to run. This must be fast and
581 * non-blocking.
582 * *info - An arbitrary pointer to pass to the function.
583 * nonatomic - currently unused.
584 * wait - If true, wait (atomically) until function has
585 * completed on other CPUs.
586 *
587 * Returns: 0 on success, else a negative status code. Does not return
588 * until remote CPUs are nearly ready to execute <<func>> or
589 * are or have executed.
590 *
591 * Cautions: You must not call this function with disabled interrupts or
592 * from a hardware interrupt handler, you may call it from a
593 * bottom half handler.
594 *
595 * Modification log:
596 * Date Who Description
597 * ---------- --- --------------------------------------------------------
598 *
599 *==========================================================================*/
600int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
601 int wait)
602{ 550{
603 struct call_data_struct data; 551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
604 int cpus; 552}
605
606#ifdef DEBUG_SMP
607 unsigned long flags;
608 __save_flags(flags);
609 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
610 BUG();
611#endif /* DEBUG_SMP */
612
613 /* Holding any lock stops cpus from going down. */
614 spin_lock(&call_lock);
615 cpus = num_online_cpus() - 1;
616
617 if (!cpus) {
618 spin_unlock(&call_lock);
619 return 0;
620 }
621
622 /* Can deadlock when called with interrupts disabled */
623 WARN_ON(irqs_disabled());
624
625 data.func = func;
626 data.info = info;
627 atomic_set(&data.started, 0);
628 data.wait = wait;
629 if (wait)
630 atomic_set(&data.finished, 0);
631
632 call_data = &data;
633 mb();
634
635 /* Send a message to all other CPUs and wait for them to respond */
636 send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
637
638 /* Wait for response */
639 while (atomic_read(&data.started) != cpus)
640 barrier();
641
642 if (wait)
643 while (atomic_read(&data.finished) != cpus)
644 barrier();
645 spin_unlock(&call_lock);
646 553
647 return 0; 554void arch_send_call_function_single_ipi(int cpu)
555{
556 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
648} 557}
649 558
650/*==========================================================================* 559/*==========================================================================*
@@ -666,27 +575,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
666 *==========================================================================*/ 575 *==========================================================================*/
667void smp_call_function_interrupt(void) 576void smp_call_function_interrupt(void)
668{ 577{
669 void (*func) (void *info) = call_data->func;
670 void *info = call_data->info;
671 int wait = call_data->wait;
672
673 /*
674 * Notify initiating CPU that I've grabbed the data and am
675 * about to execute the function
676 */
677 mb();
678 atomic_inc(&call_data->started);
679 /*
680 * At this point the info structure may be out of scope unless wait==1
681 */
682 irq_enter(); 578 irq_enter();
683 (*func)(info); 579 generic_smp_call_function_interrupt();
684 irq_exit(); 580 irq_exit();
581}
685 582
686 if (wait) { 583void smp_call_function_single_interrupt(void)
687 mb(); 584{
688 atomic_inc(&call_data->finished); 585 irq_enter();
689 } 586 generic_smp_call_function_single_interrupt();
587 irq_exit();
690} 588}
691 589
692/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 590/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 89ba4a0b5d51..46159a4e644b 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -40,6 +40,7 @@ extern void smp_invalidate_interrupt(void);
40extern void smp_call_function_interrupt(void); 40extern void smp_call_function_interrupt(void);
41extern void smp_ipi_timer_interrupt(void); 41extern void smp_ipi_timer_interrupt(void);
42extern void smp_flush_cache_all_interrupt(void); 42extern void smp_flush_cache_all_interrupt(void);
43extern void smp_call_function_single_interrupt(void);
43 44
44/* 45/*
45 * for Boot AP function 46 * for Boot AP function
@@ -103,7 +104,7 @@ void set_eit_vector_entries(void)
103 eit_vector[186] = (unsigned long)smp_call_function_interrupt; 104 eit_vector[186] = (unsigned long)smp_call_function_interrupt;
104 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; 105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
105 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; 106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
106 eit_vector[189] = 0; 107 eit_vector[189] = (unsigned long)smp_call_function_single_interrupt;
107 eit_vector[190] = 0; 108 eit_vector[190] = 0;
108 eit_vector[191] = 0; 109 eit_vector[191] = 0;
109#endif 110#endif
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index 078e1a51a042..c5dd66916692 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -89,6 +89,9 @@ static __inline__ unsigned int num_booting_cpus(void)
89extern void smp_send_timer(void); 89extern void smp_send_timer(void);
90extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 90extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
91 91
92extern void arch_send_call_function_single_ipi(int cpu);
93extern void arch_send_call_function_ipi(cpumask_t mask);
94
92#endif /* not __ASSEMBLY__ */ 95#endif /* not __ASSEMBLY__ */
93 96
94#define NO_PROC_ID (0xff) /* No processor magic marker */ 97#define NO_PROC_ID (0xff) /* No processor magic marker */
@@ -104,6 +107,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
104#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0) 107#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
105#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0) 108#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
106#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0) 109#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
110#define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0)
107 111
108#define IPI_SHIFT (0) 112#define IPI_SHIFT (0)
109#define NR_IPIS (8) 113#define NR_IPIS (8)