aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/kernel/irq-rm9000.c4
-rw-r--r--arch/mips/kernel/smp.c149
-rw-r--r--arch/mips/kernel/smtc.c1
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/oprofile/common.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c2
-rw-r--r--arch/mips/sibyte/sb1250/prom.c2
9 files changed, 33 insertions, 152 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 24c5dee91768..d2be3ffca280 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1763,6 +1763,7 @@ config SMP
1763 bool "Multi-Processing support" 1763 bool "Multi-Processing support"
1764 depends on SYS_SUPPORTS_SMP 1764 depends on SYS_SUPPORTS_SMP
1765 select IRQ_PER_CPU 1765 select IRQ_PER_CPU
1766 select USE_GENERIC_SMP_HELPERS
1766 help 1767 help
1767 This enables support for systems with more than one CPU. If you have 1768 This enables support for systems with more than one CPU. If you have
1768 a system with only one CPU, like most personal computers, say N. If 1769 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index ed9febe63d72..b47e4615ec12 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args)
49 49
50static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) 50static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
51{ 51{
52 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1); 52 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1);
53 53
54 return 0; 54 return 0;
55} 55}
@@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args)
66 66
67static void rm9k_perfcounter_irq_shutdown(unsigned int irq) 67static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
68{ 68{
69 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); 69 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1);
70} 70}
71 71
72static struct irq_chip rm9k_irq_controller = { 72static struct irq_chip rm9k_irq_controller = {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index cdf87a9dd4ba..4410f172b8ab 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void)
131 cpu_idle(); 131 cpu_idle();
132} 132}
133 133
134DEFINE_SPINLOCK(smp_call_lock); 134void arch_send_call_function_ipi(cpumask_t mask)
135
136struct call_data_struct *call_data;
137
138/*
139 * Run a function on all other CPUs.
140 *
141 * <mask> cpuset_t of all processors to run the function on.
142 * <func> The function to run. This must be fast and non-blocking.
143 * <info> An arbitrary pointer to pass to the function.
144 * <retry> If true, keep retrying until ready.
145 * <wait> If true, wait until function has completed on other CPUs.
146 * [RETURNS] 0 on success, else a negative status code.
147 *
148 * Does not return until remote CPUs are nearly ready to execute <func>
149 * or are or have executed.
150 *
151 * You must not call this function with disabled interrupts or from a
152 * hardware interrupt handler or from a bottom half handler:
153 *
154 * CPU A CPU B
155 * Disable interrupts
156 * smp_call_function()
157 * Take call_lock
158 * Send IPIs
159 * Wait for all cpus to acknowledge IPI
160 * CPU A has not responded, spin waiting
161 * for cpu A to respond, holding call_lock
162 * smp_call_function()
163 * Spin waiting for call_lock
164 * Deadlock Deadlock
165 */
166int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
167 void *info, int retry, int wait)
168{ 135{
169 struct call_data_struct data;
170 int cpu = smp_processor_id();
171 int cpus;
172
173 /*
174 * Can die spectacularly if this CPU isn't yet marked online
175 */
176 BUG_ON(!cpu_online(cpu));
177
178 cpu_clear(cpu, mask);
179 cpus = cpus_weight(mask);
180 if (!cpus)
181 return 0;
182
183 /* Can deadlock when called with interrupts disabled */
184 WARN_ON(irqs_disabled());
185
186 data.func = func;
187 data.info = info;
188 atomic_set(&data.started, 0);
189 data.wait = wait;
190 if (wait)
191 atomic_set(&data.finished, 0);
192
193 spin_lock(&smp_call_lock);
194 call_data = &data;
195 smp_mb();
196
197 /* Send a message to all other CPUs and wait for them to respond */
198 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 136 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
199
200 /* Wait for response */
201 /* FIXME: lock-up detection, backtrace on lock-up */
202 while (atomic_read(&data.started) != cpus)
203 barrier();
204
205 if (wait)
206 while (atomic_read(&data.finished) != cpus)
207 barrier();
208 call_data = NULL;
209 spin_unlock(&smp_call_lock);
210
211 return 0;
212} 137}
213 138
214int smp_call_function(void (*func) (void *info), void *info, int retry, 139/*
215 int wait) 140 * We reuse the same vector for the single IPI
141 */
142void arch_send_call_function_single_ipi(int cpu)
216{ 143{
217 return smp_call_function_mask(cpu_online_map, func, info, retry, wait); 144 mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
218} 145}
219EXPORT_SYMBOL(smp_call_function);
220 146
147/*
148 * Call into both interrupt handlers, as we share the IPI for them
149 */
221void smp_call_function_interrupt(void) 150void smp_call_function_interrupt(void)
222{ 151{
223 void (*func) (void *info) = call_data->func;
224 void *info = call_data->info;
225 int wait = call_data->wait;
226
227 /*
228 * Notify initiating CPU that I've grabbed the data and am
229 * about to execute the function.
230 */
231 smp_mb();
232 atomic_inc(&call_data->started);
233
234 /*
235 * At this point the info structure may be out of scope unless wait==1.
236 */
237 irq_enter(); 152 irq_enter();
238 (*func)(info); 153 generic_smp_call_function_single_interrupt();
154 generic_smp_call_function_interrupt();
239 irq_exit(); 155 irq_exit();
240
241 if (wait) {
242 smp_mb();
243 atomic_inc(&call_data->finished);
244 }
245}
246
247int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
248 int retry, int wait)
249{
250 int ret, me;
251
252 /*
253 * Can die spectacularly if this CPU isn't yet marked online
254 */
255 if (!cpu_online(cpu))
256 return 0;
257
258 me = get_cpu();
259 BUG_ON(!cpu_online(me));
260
261 if (cpu == me) {
262 local_irq_disable();
263 func(info);
264 local_irq_enable();
265 put_cpu();
266 return 0;
267 }
268
269 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
270 wait);
271
272 put_cpu();
273 return 0;
274} 156}
275EXPORT_SYMBOL(smp_call_function_single);
276 157
277static void stop_this_cpu(void *dummy) 158static void stop_this_cpu(void *dummy)
278{ 159{
@@ -286,7 +167,7 @@ static void stop_this_cpu(void *dummy)
286 167
287void smp_send_stop(void) 168void smp_send_stop(void)
288{ 169{
289 smp_call_function(stop_this_cpu, NULL, 1, 0); 170 smp_call_function(stop_this_cpu, NULL, 0);
290} 171}
291 172
292void __init smp_cpus_done(unsigned int max_cpus) 173void __init smp_cpus_done(unsigned int max_cpus)
@@ -365,7 +246,7 @@ static void flush_tlb_all_ipi(void *info)
365 246
366void flush_tlb_all(void) 247void flush_tlb_all(void)
367{ 248{
368 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); 249 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
369} 250}
370 251
371static void flush_tlb_mm_ipi(void *mm) 252static void flush_tlb_mm_ipi(void *mm)
@@ -385,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm)
385static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 266static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
386{ 267{
387#ifndef CONFIG_MIPS_MT_SMTC 268#ifndef CONFIG_MIPS_MT_SMTC
388 smp_call_function(func, info, 1, 1); 269 smp_call_function(func, info, 1);
389#endif 270#endif
390} 271}
391 272
@@ -485,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
485 .addr2 = end, 366 .addr2 = end,
486 }; 367 };
487 368
488 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); 369 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
489} 370}
490 371
491static void flush_tlb_page_ipi(void *info) 372static void flush_tlb_page_ipi(void *info)
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 3e863186cd22..a516286532ab 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void)
877 /* Return from interrupt should be enough to cause scheduler check */ 877 /* Return from interrupt should be enough to cause scheduler check */
878} 878}
879 879
880
881static void ipi_call_interrupt(void) 880static void ipi_call_interrupt(void)
882{ 881{
883 /* Invoke generic function invocation code in smp.c */ 882 /* Invoke generic function invocation code in smp.c */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 27096751ddce..71df3390c07b 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -43,12 +43,12 @@
43 * primary cache. 43 * primary cache.
44 */ 44 */
45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
46 int retry, int wait) 46 int wait)
47{ 47{
48 preempt_disable(); 48 preempt_disable();
49 49
50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51 smp_call_function(func, info, retry, wait); 51 smp_call_function(func, info, wait);
52#endif 52#endif
53 func(info); 53 func(info);
54 preempt_enable(); 54 preempt_enable();
@@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args)
350 350
351static void r4k___flush_cache_all(void) 351static void r4k___flush_cache_all(void)
352{ 352{
353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
354} 354}
355 355
356static inline int has_valid_asid(const struct mm_struct *mm) 356static inline int has_valid_asid(const struct mm_struct *mm)
@@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
397 int exec = vma->vm_flags & VM_EXEC; 397 int exec = vma->vm_flags & VM_EXEC;
398 398
399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
401} 401}
402 402
403static inline void local_r4k_flush_cache_mm(void * args) 403static inline void local_r4k_flush_cache_mm(void * args)
@@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
429 if (!cpu_has_dc_aliases) 429 if (!cpu_has_dc_aliases)
430 return; 430 return;
431 431
432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
433} 433}
434 434
435struct flush_cache_page_args { 435struct flush_cache_page_args {
@@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
521 args.addr = addr; 521 args.addr = addr;
522 args.pfn = pfn; 522 args.pfn = pfn;
523 523
524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
525} 525}
526 526
527static inline void local_r4k_flush_data_cache_page(void * addr) 527static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
535 local_r4k_flush_data_cache_page((void *)addr); 535 local_r4k_flush_data_cache_page((void *)addr);
536 else 536 else
537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
538 1, 1); 538 1);
539} 539}
540 540
541struct flush_icache_range_args { 541struct flush_icache_range_args {
@@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
571 args.start = start; 571 args.start = start;
572 args.end = end; 572 args.end = end;
573 573
574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
575 instruction_hazard(); 575 instruction_hazard();
576} 576}
577 577
@@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
672 672
673static void r4k_flush_cache_sigtramp(unsigned long addr) 673static void r4k_flush_cache_sigtramp(unsigned long addr)
674{ 674{
675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
676} 676}
677 677
678static void r4k_flush_icache_all(void) 678static void r4k_flush_icache_all(void)
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index b5f6f71b27bc..dd2fbd6645c1 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -27,7 +27,7 @@ static int op_mips_setup(void)
27 model->reg_setup(ctr); 27 model->reg_setup(ctr);
28 28
29 /* Configure the registers on all cpus. */ 29 /* Configure the registers on all cpus. */
30 on_each_cpu(model->cpu_setup, NULL, 0, 1); 30 on_each_cpu(model->cpu_setup, NULL, 1);
31 31
32 return 0; 32 return 0;
33} 33}
@@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root)
58 58
59static int op_mips_start(void) 59static int op_mips_start(void)
60{ 60{
61 on_each_cpu(model->cpu_start, NULL, 0, 1); 61 on_each_cpu(model->cpu_start, NULL, 1);
62 62
63 return 0; 63 return 0;
64} 64}
@@ -66,7 +66,7 @@ static int op_mips_start(void)
66static void op_mips_stop(void) 66static void op_mips_stop(void)
67{ 67{
68 /* Disable performance monitoring for all counters. */ 68 /* Disable performance monitoring for all counters. */
69 on_each_cpu(model->cpu_stop, NULL, 0, 1); 69 on_each_cpu(model->cpu_stop, NULL, 1);
70} 70}
71 71
72int __init oprofile_arch_init(struct oprofile_operations *ops) 72int __init oprofile_arch_init(struct oprofile_operations *ops)
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 35dc435846a6..cf4c868715ac 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -64,7 +64,7 @@ static void prom_exit(void)
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 if (smp_processor_id()) 65 if (smp_processor_id())
66 /* CPU 1 */ 66 /* CPU 1 */
67 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 67 smp_call_function(prom_cpu0_exit, NULL, 1);
68#endif 68#endif
69 prom_cpu0_exit(NULL); 69 prom_cpu0_exit(NULL);
70} 70}
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 33fce826f8bf..fd9604d5555a 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg)
74 if (!reboot_smp) { 74 if (!reboot_smp) {
75 /* Get CPU 0 to do the cfe_exit */ 75 /* Get CPU 0 to do the cfe_exit */
76 reboot_smp = 1; 76 reboot_smp = 1;
77 smp_call_function(cfe_linux_exit, arg, 1, 0); 77 smp_call_function(cfe_linux_exit, arg, 0);
78 } 78 }
79 } else { 79 } else {
80 printk("Passing control back to CFE...\n"); 80 printk("Passing control back to CFE...\n");
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index cf8f6b3de86c..65b1af66b674 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -66,7 +66,7 @@ static void prom_linux_exit(void)
66{ 66{
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 if (smp_processor_id()) { 68 if (smp_processor_id()) {
69 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 69 smp_call_function(prom_cpu0_exit, NULL, 1);
70 } 70 }
71#endif 71#endif
72 while(1); 72 while(1);